1 /* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
2 /* $NetBSD: qat.c,v 1.6 2020/06/14 23:23:12 riastradh Exp $ */
5 * Copyright (c) 2019 Internet Initiative Japan, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
31 * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
37 * * Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * * Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in
41 * the documentation and/or other materials provided with the
43 * * Neither the name of Intel Corporation nor the names of its
44 * contributors may be used to endorse or promote products derived
45 * from this software without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
48 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
49 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
50 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
51 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
53 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
54 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
55 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
56 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
57 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 #include <sys/cdefs.h>
61 __FBSDID("$FreeBSD$");
63 __KERNEL_RCSID(0, "$NetBSD: qat.c,v 1.6 2020/06/14 23:23:12 riastradh Exp $");
66 #include <sys/param.h>
67 #include <sys/systm.h>
70 #include <sys/firmware.h>
71 #include <sys/kernel.h>
74 #include <sys/module.h>
75 #include <sys/mutex.h>
77 #include <sys/sysctl.h>
80 #include <machine/bus.h>
82 #include <opencrypto/cryptodev.h>
83 #include <opencrypto/xform.h>
85 #include "cryptodev_if.h"
87 #include <dev/pci/pcireg.h>
88 #include <dev/pci/pcivar.h>
92 #include "qat_aevar.h"
94 extern struct qat_hw qat_hw_c2xxx;
95 extern struct qat_hw qat_hw_c3xxx;
96 extern struct qat_hw qat_hw_c62x;
97 extern struct qat_hw qat_hw_d15xx;
98 extern struct qat_hw qat_hw_dh895xcc;
100 #define PCI_VENDOR_INTEL 0x8086
101 #define PCI_PRODUCT_INTEL_C2000_IQIA_PHYS 0x1f18
102 #define PCI_PRODUCT_INTEL_C3K_QAT 0x19e2
103 #define PCI_PRODUCT_INTEL_C3K_QAT_VF 0x19e3
104 #define PCI_PRODUCT_INTEL_C620_QAT 0x37c8
105 #define PCI_PRODUCT_INTEL_C620_QAT_VF 0x37c9
106 #define PCI_PRODUCT_INTEL_XEOND_QAT 0x6f54
107 #define PCI_PRODUCT_INTEL_XEOND_QAT_VF 0x6f55
108 #define PCI_PRODUCT_INTEL_DH895XCC_QAT 0x0435
109 #define PCI_PRODUCT_INTEL_DH895XCC_QAT_VF 0x0443
111 static const struct qat_product {
112 uint16_t qatp_vendor;
113 uint16_t qatp_product;
114 const char *qatp_name;
115 enum qat_chip_type qatp_chip;
116 const struct qat_hw *qatp_hw;
118 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_IQIA_PHYS,
119 "Intel C2000 QuickAssist PF",
120 QAT_CHIP_C2XXX, &qat_hw_c2xxx },
121 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C3K_QAT,
122 "Intel C3000 QuickAssist PF",
123 QAT_CHIP_C3XXX, &qat_hw_c3xxx },
124 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C620_QAT,
125 "Intel C620/Xeon D-2100 QuickAssist PF",
126 QAT_CHIP_C62X, &qat_hw_c62x },
127 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XEOND_QAT,
128 "Intel Xeon D-1500 QuickAssist PF",
129 QAT_CHIP_D15XX, &qat_hw_d15xx },
130 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_DH895XCC_QAT,
131 "Intel 8950 QuickAssist PCIe Adapter PF",
132 QAT_CHIP_DH895XCC, &qat_hw_dh895xcc },
133 { 0, 0, NULL, 0, NULL },
136 /* Hash Algorithm specific structure */
138 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
139 static const uint8_t sha1_initial_state[QAT_HASH_SHA1_STATE_SIZE] = {
140 0x67, 0x45, 0x23, 0x01,
141 0xef, 0xcd, 0xab, 0x89,
142 0x98, 0xba, 0xdc, 0xfe,
143 0x10, 0x32, 0x54, 0x76,
144 0xc3, 0xd2, 0xe1, 0xf0
147 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
148 static const uint8_t sha256_initial_state[QAT_HASH_SHA256_STATE_SIZE] = {
149 0x6a, 0x09, 0xe6, 0x67,
150 0xbb, 0x67, 0xae, 0x85,
151 0x3c, 0x6e, 0xf3, 0x72,
152 0xa5, 0x4f, 0xf5, 0x3a,
153 0x51, 0x0e, 0x52, 0x7f,
154 0x9b, 0x05, 0x68, 0x8c,
155 0x1f, 0x83, 0xd9, 0xab,
156 0x5b, 0xe0, 0xcd, 0x19
159 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
160 static const uint8_t sha384_initial_state[QAT_HASH_SHA384_STATE_SIZE] = {
161 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8,
162 0x62, 0x9a, 0x29, 0x2a, 0x36, 0x7c, 0xd5, 0x07,
163 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
164 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39,
165 0x67, 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31,
166 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11,
167 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7,
168 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4
171 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
172 static const uint8_t sha512_initial_state[QAT_HASH_SHA512_STATE_SIZE] = {
173 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08,
174 0xbb, 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b,
175 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, 0xf8, 0x2b,
176 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1,
177 0x51, 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1,
178 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
179 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b,
180 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79
183 static const struct qat_sym_hash_alg_info sha1_info = {
184 .qshai_digest_len = QAT_HASH_SHA1_DIGEST_SIZE,
185 .qshai_block_len = QAT_HASH_SHA1_BLOCK_SIZE,
186 .qshai_state_size = QAT_HASH_SHA1_STATE_SIZE,
187 .qshai_init_state = sha1_initial_state,
188 .qshai_sah = &auth_hash_hmac_sha1,
189 .qshai_state_offset = 0,
190 .qshai_state_word = 4,
193 static const struct qat_sym_hash_alg_info sha256_info = {
194 .qshai_digest_len = QAT_HASH_SHA256_DIGEST_SIZE,
195 .qshai_block_len = QAT_HASH_SHA256_BLOCK_SIZE,
196 .qshai_state_size = QAT_HASH_SHA256_STATE_SIZE,
197 .qshai_init_state = sha256_initial_state,
198 .qshai_sah = &auth_hash_hmac_sha2_256,
199 .qshai_state_offset = offsetof(SHA256_CTX, state),
200 .qshai_state_word = 4,
203 static const struct qat_sym_hash_alg_info sha384_info = {
204 .qshai_digest_len = QAT_HASH_SHA384_DIGEST_SIZE,
205 .qshai_block_len = QAT_HASH_SHA384_BLOCK_SIZE,
206 .qshai_state_size = QAT_HASH_SHA384_STATE_SIZE,
207 .qshai_init_state = sha384_initial_state,
208 .qshai_sah = &auth_hash_hmac_sha2_384,
209 .qshai_state_offset = offsetof(SHA384_CTX, state),
210 .qshai_state_word = 8,
213 static const struct qat_sym_hash_alg_info sha512_info = {
214 .qshai_digest_len = QAT_HASH_SHA512_DIGEST_SIZE,
215 .qshai_block_len = QAT_HASH_SHA512_BLOCK_SIZE,
216 .qshai_state_size = QAT_HASH_SHA512_STATE_SIZE,
217 .qshai_init_state = sha512_initial_state,
218 .qshai_sah = &auth_hash_hmac_sha2_512,
219 .qshai_state_offset = offsetof(SHA512_CTX, state),
220 .qshai_state_word = 8,
223 static const struct qat_sym_hash_alg_info aes_gcm_info = {
224 .qshai_digest_len = QAT_HASH_AES_GCM_DIGEST_SIZE,
225 .qshai_block_len = QAT_HASH_AES_GCM_BLOCK_SIZE,
226 .qshai_state_size = QAT_HASH_AES_GCM_STATE_SIZE,
227 .qshai_sah = &auth_hash_nist_gmac_aes_128,
230 /* Hash QAT specific structures */
232 static const struct qat_sym_hash_qat_info sha1_config = {
233 .qshqi_algo_enc = HW_AUTH_ALGO_SHA1,
234 .qshqi_auth_counter = QAT_HASH_SHA1_BLOCK_SIZE,
235 .qshqi_state1_len = HW_SHA1_STATE1_SZ,
236 .qshqi_state2_len = HW_SHA1_STATE2_SZ,
239 static const struct qat_sym_hash_qat_info sha256_config = {
240 .qshqi_algo_enc = HW_AUTH_ALGO_SHA256,
241 .qshqi_auth_counter = QAT_HASH_SHA256_BLOCK_SIZE,
242 .qshqi_state1_len = HW_SHA256_STATE1_SZ,
243 .qshqi_state2_len = HW_SHA256_STATE2_SZ
246 static const struct qat_sym_hash_qat_info sha384_config = {
247 .qshqi_algo_enc = HW_AUTH_ALGO_SHA384,
248 .qshqi_auth_counter = QAT_HASH_SHA384_BLOCK_SIZE,
249 .qshqi_state1_len = HW_SHA384_STATE1_SZ,
250 .qshqi_state2_len = HW_SHA384_STATE2_SZ
253 static const struct qat_sym_hash_qat_info sha512_config = {
254 .qshqi_algo_enc = HW_AUTH_ALGO_SHA512,
255 .qshqi_auth_counter = QAT_HASH_SHA512_BLOCK_SIZE,
256 .qshqi_state1_len = HW_SHA512_STATE1_SZ,
257 .qshqi_state2_len = HW_SHA512_STATE2_SZ
260 static const struct qat_sym_hash_qat_info aes_gcm_config = {
261 .qshqi_algo_enc = HW_AUTH_ALGO_GALOIS_128,
262 .qshqi_auth_counter = QAT_HASH_AES_GCM_BLOCK_SIZE,
263 .qshqi_state1_len = HW_GALOIS_128_STATE1_SZ,
265 HW_GALOIS_H_SZ + HW_GALOIS_LEN_A_SZ + HW_GALOIS_E_CTR0_SZ,
268 static const struct qat_sym_hash_def qat_sym_hash_defs[] = {
269 [QAT_SYM_HASH_SHA1] = { &sha1_info, &sha1_config },
270 [QAT_SYM_HASH_SHA256] = { &sha256_info, &sha256_config },
271 [QAT_SYM_HASH_SHA384] = { &sha384_info, &sha384_config },
272 [QAT_SYM_HASH_SHA512] = { &sha512_info, &sha512_config },
273 [QAT_SYM_HASH_AES_GCM] = { &aes_gcm_info, &aes_gcm_config },
276 static const struct qat_product *qat_lookup(device_t);
277 static int qat_probe(device_t);
278 static int qat_attach(device_t);
279 static int qat_init(struct device *);
280 static int qat_start(struct device *);
281 static int qat_detach(device_t);
283 static int qat_newsession(device_t dev, crypto_session_t cses,
284 const struct crypto_session_params *csp);
285 static void qat_freesession(device_t dev, crypto_session_t cses);
287 static int qat_setup_msix_intr(struct qat_softc *);
289 static void qat_etr_init(struct qat_softc *);
290 static void qat_etr_deinit(struct qat_softc *);
291 static void qat_etr_bank_init(struct qat_softc *, int);
292 static void qat_etr_bank_deinit(struct qat_softc *sc, int);
294 static void qat_etr_ap_bank_init(struct qat_softc *);
295 static void qat_etr_ap_bank_set_ring_mask(uint32_t *, uint32_t, int);
296 static void qat_etr_ap_bank_set_ring_dest(struct qat_softc *, uint32_t *,
298 static void qat_etr_ap_bank_setup_ring(struct qat_softc *,
300 static int qat_etr_verify_ring_size(uint32_t, uint32_t);
302 static int qat_etr_ring_intr(struct qat_softc *, struct qat_bank *,
304 static void qat_etr_bank_intr(void *);
306 static void qat_arb_update(struct qat_softc *, struct qat_bank *);
308 static struct qat_sym_cookie *qat_crypto_alloc_sym_cookie(
309 struct qat_crypto_bank *);
310 static void qat_crypto_free_sym_cookie(struct qat_crypto_bank *,
311 struct qat_sym_cookie *);
312 static int qat_crypto_setup_ring(struct qat_softc *,
313 struct qat_crypto_bank *);
314 static int qat_crypto_bank_init(struct qat_softc *,
315 struct qat_crypto_bank *);
316 static int qat_crypto_init(struct qat_softc *);
317 static void qat_crypto_deinit(struct qat_softc *);
318 static int qat_crypto_start(struct qat_softc *);
319 static void qat_crypto_stop(struct qat_softc *);
320 static int qat_crypto_sym_rxintr(struct qat_softc *, void *, void *);
322 static MALLOC_DEFINE(M_QAT, "qat", "Intel QAT driver");
324 static const struct qat_product *
325 qat_lookup(device_t dev)
327 const struct qat_product *qatp;
329 for (qatp = qat_products; qatp->qatp_name != NULL; qatp++) {
330 if (pci_get_vendor(dev) == qatp->qatp_vendor &&
331 pci_get_device(dev) == qatp->qatp_product)
338 qat_probe(device_t dev)
340 const struct qat_product *prod;
342 prod = qat_lookup(dev);
344 device_set_desc(dev, prod->qatp_name);
345 return BUS_PROBE_DEFAULT;
351 qat_attach(device_t dev)
353 struct qat_softc *sc = device_get_softc(dev);
354 const struct qat_product *qatp;
355 bus_size_t msixtbl_offset;
356 int bar, count, error, i, msixoff, msixtbl_bar;
359 sc->sc_rev = pci_get_revid(dev);
360 sc->sc_crypto.qcy_cid = -1;
362 qatp = qat_lookup(dev);
363 memcpy(&sc->sc_hw, qatp->qatp_hw, sizeof(struct qat_hw));
365 /* Determine active accelerators and engines */
366 sc->sc_accel_mask = sc->sc_hw.qhw_get_accel_mask(sc);
367 sc->sc_ae_mask = sc->sc_hw.qhw_get_ae_mask(sc);
369 sc->sc_accel_num = 0;
370 for (i = 0; i < sc->sc_hw.qhw_num_accel; i++) {
371 if (sc->sc_accel_mask & (1 << i))
375 for (i = 0; i < sc->sc_hw.qhw_num_engines; i++) {
376 if (sc->sc_ae_mask & (1 << i))
380 if (!sc->sc_accel_mask || (sc->sc_ae_mask & 0x01) == 0) {
381 device_printf(sc->sc_dev, "couldn't find acceleration");
385 MPASS(sc->sc_accel_num <= MAX_NUM_ACCEL);
386 MPASS(sc->sc_ae_num <= MAX_NUM_AE);
388 /* Determine SKU and capabilities */
389 sc->sc_sku = sc->sc_hw.qhw_get_sku(sc);
390 sc->sc_accel_cap = sc->sc_hw.qhw_get_accel_cap(sc);
391 sc->sc_fw_uof_name = sc->sc_hw.qhw_get_fw_uof_name(sc);
396 if (pci_find_cap(dev, PCIY_MSIX, &msixoff) == 0) {
398 msixtbl = pci_read_config(dev, msixoff + PCIR_MSIX_TABLE, 4);
399 msixtbl_offset = msixtbl & ~PCIM_MSIX_BIR_MASK;
400 msixtbl_bar = PCIR_BAR(msixtbl & PCIM_MSIX_BIR_MASK);
404 if (sc->sc_hw.qhw_sram_bar_id != NO_PCI_REG) {
405 MPASS(sc->sc_hw.qhw_sram_bar_id == 0);
406 uint32_t fusectl = pci_read_config(dev, FUSECTL_REG, 4);
408 i = (fusectl & FUSECTL_MASK) ? 1 : 0;
410 for (bar = 0; bar < PCIR_MAX_BAR_0; bar++) {
411 uint32_t val = pci_read_config(dev, PCIR_BAR(bar), 4);
412 if (val == 0 || !PCI_BAR_MEM(val))
415 sc->sc_rid[i] = PCIR_BAR(bar);
416 sc->sc_res[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
417 &sc->sc_rid[i], RF_ACTIVE);
418 if (sc->sc_res[i] == NULL) {
419 device_printf(dev, "couldn't map BAR %d\n", bar);
423 sc->sc_csrt[i] = rman_get_bustag(sc->sc_res[i]);
424 sc->sc_csrh[i] = rman_get_bushandle(sc->sc_res[i]);
427 if ((val & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64)
431 pci_enable_busmaster(dev);
433 count = sc->sc_hw.qhw_num_banks + 1;
434 if (pci_msix_count(dev) < count) {
435 device_printf(dev, "insufficient MSI-X vectors (%d vs. %d)\n",
436 pci_msix_count(dev), count);
439 error = pci_alloc_msix(dev, &count);
441 device_printf(dev, "failed to allocate MSI-X vectors\n");
445 error = qat_init(dev);
455 qat_init(device_t dev)
457 struct qat_softc *sc = device_get_softc(dev);
462 if (sc->sc_hw.qhw_init_admin_comms != NULL &&
463 (error = sc->sc_hw.qhw_init_admin_comms(sc)) != 0) {
464 device_printf(sc->sc_dev,
465 "Could not initialize admin comms: %d\n", error);
469 if (sc->sc_hw.qhw_init_arb != NULL &&
470 (error = sc->sc_hw.qhw_init_arb(sc)) != 0) {
471 device_printf(sc->sc_dev,
472 "Could not initialize hw arbiter: %d\n", error);
476 error = qat_ae_init(sc);
478 device_printf(sc->sc_dev,
479 "Could not initialize Acceleration Engine: %d\n", error);
483 error = qat_aefw_load(sc);
485 device_printf(sc->sc_dev,
486 "Could not load firmware: %d\n", error);
490 error = qat_setup_msix_intr(sc);
492 device_printf(sc->sc_dev,
493 "Could not setup interrupts: %d\n", error);
497 sc->sc_hw.qhw_enable_intr(sc);
499 error = qat_crypto_init(sc);
501 device_printf(sc->sc_dev,
502 "Could not initialize service: %d\n", error);
506 if (sc->sc_hw.qhw_enable_error_correction != NULL)
507 sc->sc_hw.qhw_enable_error_correction(sc);
509 if (sc->sc_hw.qhw_set_ssm_wdtimer != NULL &&
510 (error = sc->sc_hw.qhw_set_ssm_wdtimer(sc)) != 0) {
511 device_printf(sc->sc_dev,
512 "Could not initialize watchdog timer: %d\n", error);
516 error = qat_start(dev);
518 device_printf(sc->sc_dev,
519 "Could not start: %d\n", error);
527 qat_start(device_t dev)
529 struct qat_softc *sc = device_get_softc(dev);
532 error = qat_ae_start(sc);
536 if (sc->sc_hw.qhw_send_admin_init != NULL &&
537 (error = sc->sc_hw.qhw_send_admin_init(sc)) != 0) {
541 error = qat_crypto_start(sc);
549 qat_detach(device_t dev)
551 struct qat_softc *sc;
554 sc = device_get_softc(dev);
557 qat_crypto_deinit(sc);
560 if (sc->sc_etr_banks != NULL) {
561 for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) {
562 struct qat_bank *qb = &sc->sc_etr_banks[i];
564 if (qb->qb_ih_cookie != NULL)
565 (void)bus_teardown_intr(dev, qb->qb_ih,
567 if (qb->qb_ih != NULL)
568 (void)bus_release_resource(dev, SYS_RES_IRQ,
572 if (sc->sc_ih_cookie != NULL) {
573 (void)bus_teardown_intr(dev, sc->sc_ih, sc->sc_ih_cookie);
574 sc->sc_ih_cookie = NULL;
576 if (sc->sc_ih != NULL) {
577 (void)bus_release_resource(dev, SYS_RES_IRQ,
578 sc->sc_hw.qhw_num_banks + 1, sc->sc_ih);
581 pci_release_msi(dev);
585 for (bar = 0; bar < MAX_BARS; bar++) {
586 if (sc->sc_res[bar] != NULL) {
587 (void)bus_release_resource(dev, SYS_RES_MEMORY,
588 sc->sc_rid[bar], sc->sc_res[bar]);
589 sc->sc_res[bar] = NULL;
597 qat_alloc_mem(size_t size)
599 return (malloc(size, M_QAT, M_WAITOK | M_ZERO));
603 qat_free_mem(void *ptr)
609 qat_alloc_dmamem_cb(void *arg, bus_dma_segment_t *segs, int nseg,
612 struct qat_dmamem *qdm;
617 KASSERT(nseg == 1, ("%s: nsegs is %d", __func__, nseg));
619 qdm->qdm_dma_seg = segs[0];
623 qat_alloc_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm,
624 int nseg, bus_size_t size, bus_size_t alignment)
628 KASSERT(qdm->qdm_dma_vaddr == NULL,
629 ("%s: DMA memory descriptor in use", __func__));
631 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),
632 alignment, 0, /* alignment, boundary */
633 BUS_SPACE_MAXADDR, /* lowaddr */
634 BUS_SPACE_MAXADDR, /* highaddr */
635 NULL, NULL, /* filter, filterarg */
637 nseg, /* nsegments */
638 size, /* maxsegsize */
639 BUS_DMA_COHERENT, /* flags */
640 NULL, NULL, /* lockfunc, lockarg */
645 error = bus_dmamem_alloc(qdm->qdm_dma_tag, &qdm->qdm_dma_vaddr,
646 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
649 device_printf(sc->sc_dev,
650 "couldn't allocate dmamem, error = %d\n", error);
654 error = bus_dmamap_load(qdm->qdm_dma_tag, qdm->qdm_dma_map,
655 qdm->qdm_dma_vaddr, size, qat_alloc_dmamem_cb, qdm,
658 device_printf(sc->sc_dev,
659 "couldn't load dmamem map, error = %d\n", error);
665 bus_dmamem_free(qdm->qdm_dma_tag, qdm->qdm_dma_vaddr, qdm->qdm_dma_map);
667 bus_dma_tag_destroy(qdm->qdm_dma_tag);
672 qat_free_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm)
674 if (qdm->qdm_dma_tag != NULL) {
675 bus_dmamap_unload(qdm->qdm_dma_tag, qdm->qdm_dma_map);
676 bus_dmamem_free(qdm->qdm_dma_tag, qdm->qdm_dma_vaddr,
678 bus_dma_tag_destroy(qdm->qdm_dma_tag);
679 explicit_bzero(qdm, sizeof(*qdm));
684 qat_setup_msix_intr(struct qat_softc *sc)
691 for (i = 1; i <= sc->sc_hw.qhw_num_banks; i++) {
692 struct qat_bank *qb = &sc->sc_etr_banks[i - 1];
695 qb->qb_ih = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
697 if (qb->qb_ih == NULL) {
699 "failed to allocate bank intr resource\n");
702 error = bus_setup_intr(dev, qb->qb_ih,
703 INTR_TYPE_NET | INTR_MPSAFE, NULL, qat_etr_bank_intr, qb,
706 device_printf(dev, "failed to set up bank intr\n");
709 error = bus_bind_intr(dev, qb->qb_ih, (i - 1) % mp_ncpus);
711 device_printf(dev, "failed to bind intr %d\n", i);
715 sc->sc_ih = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
717 if (sc->sc_ih == NULL)
719 error = bus_setup_intr(dev, sc->sc_ih, INTR_TYPE_NET | INTR_MPSAFE,
720 NULL, qat_ae_cluster_intr, sc, &sc->sc_ih_cookie);
726 qat_etr_init(struct qat_softc *sc)
730 sc->sc_etr_banks = qat_alloc_mem(
731 sizeof(struct qat_bank) * sc->sc_hw.qhw_num_banks);
733 for (i = 0; i < sc->sc_hw.qhw_num_banks; i++)
734 qat_etr_bank_init(sc, i);
736 if (sc->sc_hw.qhw_num_ap_banks) {
737 sc->sc_etr_ap_banks = qat_alloc_mem(
738 sizeof(struct qat_ap_bank) * sc->sc_hw.qhw_num_ap_banks);
739 qat_etr_ap_bank_init(sc);
744 qat_etr_deinit(struct qat_softc *sc)
748 if (sc->sc_etr_banks != NULL) {
749 for (i = 0; i < sc->sc_hw.qhw_num_banks; i++)
750 qat_etr_bank_deinit(sc, i);
751 qat_free_mem(sc->sc_etr_banks);
752 sc->sc_etr_banks = NULL;
754 if (sc->sc_etr_ap_banks != NULL) {
755 qat_free_mem(sc->sc_etr_ap_banks);
756 sc->sc_etr_ap_banks = NULL;
761 qat_etr_bank_init(struct qat_softc *sc, int bank)
763 struct qat_bank *qb = &sc->sc_etr_banks[bank];
764 int i, tx_rx_gap = sc->sc_hw.qhw_tx_rx_gap;
766 MPASS(bank < sc->sc_hw.qhw_num_banks);
768 mtx_init(&qb->qb_bank_mtx, "qb bank", NULL, MTX_DEF);
772 qb->qb_coalescing_time = COALESCING_TIME_INTERVAL_DEFAULT;
774 /* Clean CSRs for all rings within the bank */
775 for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) {
776 struct qat_ring *qr = &qb->qb_et_rings[i];
778 qat_etr_bank_ring_write_4(sc, bank, i,
780 qat_etr_bank_ring_base_write_8(sc, bank, i, 0);
782 if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) {
783 qr->qr_inflight = qat_alloc_mem(sizeof(uint32_t));
784 } else if (sc->sc_hw.qhw_tx_rings_mask &
785 (1 << (i - tx_rx_gap))) {
786 /* Share inflight counter with rx and tx */
788 qb->qb_et_rings[i - tx_rx_gap].qr_inflight;
792 if (sc->sc_hw.qhw_init_etr_intr != NULL) {
793 sc->sc_hw.qhw_init_etr_intr(sc, bank);
795 /* common code in qat 1.7 */
796 qat_etr_bank_write_4(sc, bank, ETR_INT_REG,
797 ETR_INT_REG_CLEAR_MASK);
798 for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank /
799 ETR_RINGS_PER_INT_SRCSEL; i++) {
800 qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL +
801 (i * ETR_INT_SRCSEL_NEXT_OFFSET),
802 ETR_INT_SRCSEL_MASK);
808 qat_etr_bank_deinit(struct qat_softc *sc, int bank)
814 qb = &sc->sc_etr_banks[bank];
815 for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) {
816 if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) {
817 qr = &qb->qb_et_rings[i];
818 qat_free_mem(qr->qr_inflight);
824 qat_etr_ap_bank_init(struct qat_softc *sc)
828 for (ap_bank = 0; ap_bank < sc->sc_hw.qhw_num_ap_banks; ap_bank++) {
829 struct qat_ap_bank *qab = &sc->sc_etr_ap_banks[ap_bank];
831 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_MASK,
832 ETR_AP_NF_MASK_INIT);
833 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, 0);
834 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_MASK,
835 ETR_AP_NE_MASK_INIT);
836 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, 0);
838 memset(qab, 0, sizeof(*qab));
843 qat_etr_ap_bank_set_ring_mask(uint32_t *ap_mask, uint32_t ring, int set_mask)
846 *ap_mask |= (1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
848 *ap_mask &= ~(1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
852 qat_etr_ap_bank_set_ring_dest(struct qat_softc *sc, uint32_t *ap_dest,
853 uint32_t ring, int set_dest)
856 uint8_t mailbox, ae, nae;
857 uint8_t *dest = (uint8_t *)ap_dest;
859 mailbox = ETR_RING_AP_MAILBOX_NUMBER(ring);
862 ae_mask = sc->sc_ae_mask;
863 for (ae = 0; ae < sc->sc_hw.qhw_num_engines; ae++) {
864 if ((ae_mask & (1 << ae)) == 0)
868 dest[nae] = __SHIFTIN(ae, ETR_AP_DEST_AE) |
869 __SHIFTIN(mailbox, ETR_AP_DEST_MAILBOX) |
875 if (nae == ETR_MAX_AE_PER_MAILBOX)
881 qat_etr_ap_bank_setup_ring(struct qat_softc *sc, struct qat_ring *qr)
883 struct qat_ap_bank *qab;
886 if (sc->sc_hw.qhw_num_ap_banks == 0)
889 ap_bank = ETR_RING_AP_BANK_NUMBER(qr->qr_ring);
890 MPASS(ap_bank < sc->sc_hw.qhw_num_ap_banks);
891 qab = &sc->sc_etr_ap_banks[ap_bank];
893 if (qr->qr_cb == NULL) {
894 qat_etr_ap_bank_set_ring_mask(&qab->qab_ne_mask, qr->qr_ring, 1);
895 if (!qab->qab_ne_dest) {
896 qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_ne_dest,
898 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST,
902 qat_etr_ap_bank_set_ring_mask(&qab->qab_nf_mask, qr->qr_ring, 1);
903 if (!qab->qab_nf_dest) {
904 qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_nf_dest,
906 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST,
913 qat_etr_verify_ring_size(uint32_t msg_size, uint32_t num_msgs)
915 int i = QAT_MIN_RING_SIZE;
917 for (; i <= QAT_MAX_RING_SIZE; i++)
918 if ((msg_size * num_msgs) == QAT_SIZE_TO_RING_SIZE_IN_BYTES(i))
921 return QAT_DEFAULT_RING_SIZE;
925 qat_etr_setup_ring(struct qat_softc *sc, int bank, uint32_t ring,
926 uint32_t num_msgs, uint32_t msg_size, qat_cb_t cb, void *cb_arg,
927 const char *name, struct qat_ring **rqr)
930 struct qat_ring *qr = NULL;
932 uint32_t ring_size_bytes, ring_config;
934 uint32_t wm_nf = ETR_RING_CONFIG_NEAR_WM_512;
935 uint32_t wm_ne = ETR_RING_CONFIG_NEAR_WM_0;
937 MPASS(bank < sc->sc_hw.qhw_num_banks);
939 /* Allocate a ring from specified bank */
940 qb = &sc->sc_etr_banks[bank];
942 if (ring >= sc->sc_hw.qhw_num_rings_per_bank)
944 if (qb->qb_allocated_rings & (1 << ring))
946 qr = &qb->qb_et_rings[ring];
947 qb->qb_allocated_rings |= 1 << ring;
949 /* Initialize allocated ring */
953 qr->qr_ring_id = qr->qr_bank * sc->sc_hw.qhw_num_rings_per_bank + ring;
954 qr->qr_ring_mask = (1 << ring);
956 qr->qr_cb_arg = cb_arg;
958 /* Setup the shadow variables */
961 qr->qr_msg_size = QAT_BYTES_TO_MSG_SIZE(msg_size);
962 qr->qr_ring_size = qat_etr_verify_ring_size(msg_size, num_msgs);
965 * To make sure that ring is alligned to ring size allocate
966 * at least 4k and then tell the user it is smaller.
968 ring_size_bytes = QAT_SIZE_TO_RING_SIZE_IN_BYTES(qr->qr_ring_size);
969 ring_size_bytes = QAT_RING_SIZE_BYTES_MIN(ring_size_bytes);
970 error = qat_alloc_dmamem(sc, &qr->qr_dma, 1, ring_size_bytes,
975 qr->qr_ring_vaddr = qr->qr_dma.qdm_dma_vaddr;
976 qr->qr_ring_paddr = qr->qr_dma.qdm_dma_seg.ds_addr;
978 memset(qr->qr_ring_vaddr, QAT_RING_PATTERN,
979 qr->qr_dma.qdm_dma_seg.ds_len);
981 bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map,
982 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
985 ring_config = ETR_RING_CONFIG_BUILD(qr->qr_ring_size);
988 ETR_RING_CONFIG_BUILD_RESP(qr->qr_ring_size, wm_nf, wm_ne);
990 qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_CONFIG, ring_config);
992 ring_base = ETR_RING_BASE_BUILD(qr->qr_ring_paddr, qr->qr_ring_size);
993 qat_etr_bank_ring_base_write_8(sc, bank, ring, ring_base);
995 if (sc->sc_hw.qhw_init_arb != NULL)
996 qat_arb_update(sc, qb);
998 mtx_init(&qr->qr_ring_mtx, "qr ring", NULL, MTX_DEF);
1000 qat_etr_ap_bank_setup_ring(sc, qr);
1005 qb->qb_intr_mask |= qr->qr_ring_mask;
1006 intr_mask = qb->qb_intr_mask;
1008 qat_etr_bank_write_4(sc, bank, ETR_INT_COL_EN, intr_mask);
1009 qat_etr_bank_write_4(sc, bank, ETR_INT_COL_CTL,
1010 ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time);
1019 qat_modulo(u_int data, u_int shift)
1021 u_int div = data >> shift;
1022 u_int mult = div << shift;
1027 qat_etr_put_msg(struct qat_softc *sc, struct qat_ring *qr, uint32_t *msg)
1032 mtx_lock(&qr->qr_ring_mtx);
1034 inflight = atomic_fetchadd_32(qr->qr_inflight, 1) + 1;
1035 if (inflight > QAT_MAX_INFLIGHTS(qr->qr_ring_size, qr->qr_msg_size)) {
1036 atomic_subtract_32(qr->qr_inflight, 1);
1037 qr->qr_need_wakeup = true;
1038 mtx_unlock(&qr->qr_ring_mtx);
1039 counter_u64_add(sc->sc_ring_full_restarts, 1);
1043 addr = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_tail);
1045 memcpy(addr, msg, QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size));
1047 bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map,
1048 BUS_DMASYNC_PREWRITE);
1050 qr->qr_tail = qat_modulo(qr->qr_tail +
1051 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1052 QAT_RING_SIZE_MODULO(qr->qr_ring_size));
1054 qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring,
1055 ETR_RING_TAIL_OFFSET, qr->qr_tail);
1057 mtx_unlock(&qr->qr_ring_mtx);
1063 qat_etr_ring_intr(struct qat_softc *sc, struct qat_bank *qb,
1064 struct qat_ring *qr)
1066 uint32_t *msg, nmsg = 0;
1068 bool blocked = false;
1070 mtx_lock(&qr->qr_ring_mtx);
1072 msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head);
1074 bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map,
1075 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1077 while (atomic_load_32(msg) != ETR_RING_EMPTY_ENTRY_SIG) {
1078 atomic_subtract_32(qr->qr_inflight, 1);
1080 if (qr->qr_cb != NULL) {
1081 mtx_unlock(&qr->qr_ring_mtx);
1082 handled |= qr->qr_cb(sc, qr->qr_cb_arg, msg);
1083 mtx_lock(&qr->qr_ring_mtx);
1086 atomic_store_32(msg, ETR_RING_EMPTY_ENTRY_SIG);
1088 qr->qr_head = qat_modulo(qr->qr_head +
1089 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1090 QAT_RING_SIZE_MODULO(qr->qr_ring_size));
1093 msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head);
1096 bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map,
1097 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1100 qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring,
1101 ETR_RING_HEAD_OFFSET, qr->qr_head);
1102 if (qr->qr_need_wakeup) {
1104 qr->qr_need_wakeup = false;
1108 mtx_unlock(&qr->qr_ring_mtx);
1111 crypto_unblock(sc->sc_crypto.qcy_cid, CRYPTO_SYMQ);
1117 qat_etr_bank_intr(void *arg)
1119 struct qat_bank *qb = arg;
1120 struct qat_softc *sc = qb->qb_sc;
1124 mtx_lock(&qb->qb_bank_mtx);
1126 qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, 0);
1128 /* Now handle all the responses */
1129 estat = ~qat_etr_bank_read_4(sc, qb->qb_bank, ETR_E_STAT);
1130 estat &= qb->qb_intr_mask;
1132 qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL,
1133 ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time);
1135 mtx_unlock(&qb->qb_bank_mtx);
1137 while ((i = ffs(estat)) != 0) {
1138 struct qat_ring *qr = &qb->qb_et_rings[--i];
1140 handled |= qat_etr_ring_intr(sc, qb, qr);
1145 qat_arb_update(struct qat_softc *sc, struct qat_bank *qb)
1148 qat_arb_ringsrvarben_write_4(sc, qb->qb_bank,
1149 qb->qb_allocated_rings & 0xff);
1152 static struct qat_sym_cookie *
1153 qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *qcb)
1155 struct qat_sym_cookie *qsc;
1157 mtx_lock(&qcb->qcb_bank_mtx);
1159 if (qcb->qcb_symck_free_count == 0) {
1160 mtx_unlock(&qcb->qcb_bank_mtx);
1164 qsc = qcb->qcb_symck_free[--qcb->qcb_symck_free_count];
1166 mtx_unlock(&qcb->qcb_bank_mtx);
1172 qat_crypto_free_sym_cookie(struct qat_crypto_bank *qcb,
1173 struct qat_sym_cookie *qsc)
1176 explicit_bzero(qsc->qsc_iv_buf, sizeof(qsc->qsc_iv_buf));
1177 explicit_bzero(qsc->qsc_auth_res, sizeof(qsc->qsc_auth_res));
1179 mtx_lock(&qcb->qcb_bank_mtx);
1180 qcb->qcb_symck_free[qcb->qcb_symck_free_count++] = qsc;
1181 mtx_unlock(&qcb->qcb_bank_mtx);
1185 qat_memcpy_htobe64(void *dst, const void *src, size_t len)
1187 uint64_t *dst0 = dst;
1188 const uint64_t *src0 = src;
1191 MPASS(len % sizeof(*dst0) == 0);
1193 for (i = 0; i < len / sizeof(*dst0); i++)
1194 *(dst0 + i) = htobe64(*(src0 + i));
1198 qat_memcpy_htobe32(void *dst, const void *src, size_t len)
1200 uint32_t *dst0 = dst;
1201 const uint32_t *src0 = src;
1204 MPASS(len % sizeof(*dst0) == 0);
1206 for (i = 0; i < len / sizeof(*dst0); i++)
1207 *(dst0 + i) = htobe32(*(src0 + i));
1211 qat_memcpy_htobe(void *dst, const void *src, size_t len, uint32_t wordbyte)
1215 qat_memcpy_htobe32(dst, src, len);
1218 qat_memcpy_htobe64(dst, src, len);
1221 panic("invalid word size %u", wordbyte);
1226 qat_crypto_gmac_precompute(const struct qat_crypto_desc *desc,
1227 const uint8_t *key, int klen, const struct qat_sym_hash_def *hash_def,
1230 uint32_t ks[4 * (RIJNDAEL_MAXNR + 1)];
1231 char zeros[AES_BLOCK_LEN];
1234 memset(zeros, 0, sizeof(zeros));
1235 rounds = rijndaelKeySetupEnc(ks, key, klen * NBBY);
1236 rijndaelEncrypt(ks, rounds, zeros, state);
1237 explicit_bzero(ks, sizeof(ks));
1241 qat_crypto_hmac_precompute(const struct qat_crypto_desc *desc,
1242 const uint8_t *key, int klen, const struct qat_sym_hash_def *hash_def,
1243 uint8_t *state1, uint8_t *state2)
1246 const struct auth_hash *sah = hash_def->qshd_alg->qshai_sah;
1247 uint32_t state_offset = hash_def->qshd_alg->qshai_state_offset;
1248 uint32_t state_size = hash_def->qshd_alg->qshai_state_size;
1249 uint32_t state_word = hash_def->qshd_alg->qshai_state_word;
1251 hmac_init_ipad(sah, key, klen, &ctx);
1252 qat_memcpy_htobe(state1, (uint8_t *)&ctx + state_offset, state_size,
1254 hmac_init_opad(sah, key, klen, &ctx);
1255 qat_memcpy_htobe(state2, (uint8_t *)&ctx + state_offset, state_size,
1257 explicit_bzero(&ctx, sizeof(ctx));
1260 static enum hw_cipher_algo
1261 qat_aes_cipher_algo(int klen)
1264 case HW_AES_128_KEY_SZ:
1265 return HW_CIPHER_ALGO_AES128;
1266 case HW_AES_192_KEY_SZ:
1267 return HW_CIPHER_ALGO_AES192;
1268 case HW_AES_256_KEY_SZ:
1269 return HW_CIPHER_ALGO_AES256;
1271 panic("invalid key length %d", klen);
1276 qat_crypto_load_cipher_session(const struct qat_crypto_desc *desc,
1277 const struct qat_session *qs)
1279 enum hw_cipher_algo algo;
1280 enum hw_cipher_dir dir;
1281 enum hw_cipher_convert key_convert;
1282 enum hw_cipher_mode mode;
1284 dir = desc->qcd_cipher_dir;
1285 key_convert = HW_CIPHER_NO_CONVERT;
1286 mode = qs->qs_cipher_mode;
1288 case HW_CIPHER_CBC_MODE:
1289 case HW_CIPHER_XTS_MODE:
1290 algo = qs->qs_cipher_algo;
1293 * AES decrypt key needs to be reversed.
1294 * Instead of reversing the key at session registration,
1295 * it is instead reversed on-the-fly by setting the KEY_CONVERT
1298 if (desc->qcd_cipher_dir == HW_CIPHER_DECRYPT)
1299 key_convert = HW_CIPHER_KEY_CONVERT;
1301 case HW_CIPHER_CTR_MODE:
1302 algo = qs->qs_cipher_algo;
1303 dir = HW_CIPHER_ENCRYPT;
1306 panic("unhandled cipher mode %d", mode);
1310 return HW_CIPHER_CONFIG_BUILD(mode, algo, key_convert, dir);
1314 qat_crypto_load_auth_session(const struct qat_crypto_desc *desc,
1315 const struct qat_session *qs, const struct qat_sym_hash_def **hash_def)
1317 enum qat_sym_hash_algorithm algo;
1319 switch (qs->qs_auth_algo) {
1320 case HW_AUTH_ALGO_SHA1:
1321 algo = QAT_SYM_HASH_SHA1;
1323 case HW_AUTH_ALGO_SHA256:
1324 algo = QAT_SYM_HASH_SHA256;
1326 case HW_AUTH_ALGO_SHA384:
1327 algo = QAT_SYM_HASH_SHA384;
1329 case HW_AUTH_ALGO_SHA512:
1330 algo = QAT_SYM_HASH_SHA512;
1332 case HW_AUTH_ALGO_GALOIS_128:
1333 algo = QAT_SYM_HASH_AES_GCM;
1336 panic("unhandled auth algorithm %d", qs->qs_auth_algo);
1339 *hash_def = &qat_sym_hash_defs[algo];
1341 return HW_AUTH_CONFIG_BUILD(qs->qs_auth_mode,
1342 (*hash_def)->qshd_qat->qshqi_algo_enc,
1343 (*hash_def)->qshd_alg->qshai_digest_len);
1346 struct qat_crypto_load_cb_arg {
1347 struct qat_session *qs;
1348 struct qat_sym_cookie *qsc;
1349 struct cryptop *crp;
1354 qat_crypto_load_cb(void *_arg, bus_dma_segment_t *segs, int nseg,
1357 struct cryptop *crp;
1358 struct flat_buffer_desc *flatbuf;
1359 struct qat_crypto_load_cb_arg *arg;
1360 struct qat_session *qs;
1361 struct qat_sym_cookie *qsc;
1364 int iseg, oseg, skip;
1376 if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
1378 * The firmware expects AAD to be in a contiguous buffer and
1379 * padded to a multiple of 16 bytes. To satisfy these
1380 * constraints we bounce the AAD into a per-request buffer.
1382 crypto_copydata(crp, crp->crp_aad_start, crp->crp_aad_length,
1384 memset(qsc->qsc_gcm_aad + crp->crp_aad_length, 0,
1385 roundup2(crp->crp_aad_length, QAT_AES_GCM_AAD_ALIGN) -
1386 crp->crp_aad_length);
1387 skip = crp->crp_payload_start;
1388 } else if (crp->crp_aad_length > 0) {
1389 skip = crp->crp_aad_start;
1391 skip = crp->crp_payload_start;
1394 for (iseg = oseg = 0; iseg < nseg; iseg++) {
1395 addr = segs[iseg].ds_addr;
1396 len = segs[iseg].ds_len;
1409 flatbuf = &qsc->qsc_flat_bufs[oseg++];
1410 flatbuf->data_len_in_bytes = (uint32_t)len;
1411 flatbuf->phy_buffer = (uint64_t)addr;
1413 qsc->qsc_buf_list.num_buffers = oseg;
1417 qat_crypto_load(struct qat_session *qs, struct qat_sym_cookie *qsc,
1418 struct qat_crypto_desc const *desc, struct cryptop *crp)
1420 struct qat_crypto_load_cb_arg arg;
1423 crypto_read_iv(crp, qsc->qsc_iv_buf);
1429 error = bus_dmamap_load_crp(qsc->qsc_buf_dma_tag, qsc->qsc_buf_dmamap,
1430 crp, qat_crypto_load_cb, &arg, BUS_DMA_NOWAIT);
1436 static inline struct qat_crypto_bank *
1437 qat_crypto_select_bank(struct qat_crypto *qcy)
1439 u_int cpuid = PCPU_GET(cpuid);
1441 return &qcy->qcy_banks[cpuid % qcy->qcy_num_banks];
1445 qat_crypto_setup_ring(struct qat_softc *sc, struct qat_crypto_bank *qcb)
1451 bank = qcb->qcb_bank;
1453 name = qcb->qcb_ring_names[curname++];
1454 snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_tx", bank);
1455 error = qat_etr_setup_ring(sc, qcb->qcb_bank,
1456 sc->sc_hw.qhw_ring_sym_tx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_req_size,
1457 NULL, NULL, name, &qcb->qcb_sym_tx);
1461 name = qcb->qcb_ring_names[curname++];
1462 snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_rx", bank);
1463 error = qat_etr_setup_ring(sc, qcb->qcb_bank,
1464 sc->sc_hw.qhw_ring_sym_rx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_resp_size,
1465 qat_crypto_sym_rxintr, qcb, name, &qcb->qcb_sym_rx);
1469 for (i = 0; i < QAT_NSYMCOOKIE; i++) {
1470 struct qat_dmamem *qdm = &qcb->qcb_symck_dmamems[i];
1471 struct qat_sym_cookie *qsc;
1473 error = qat_alloc_dmamem(sc, qdm, 1,
1474 sizeof(struct qat_sym_cookie), QAT_OPTIMAL_ALIGN);
1478 qsc = qdm->qdm_dma_vaddr;
1479 qsc->qsc_self_dmamap = qdm->qdm_dma_map;
1480 qsc->qsc_self_dma_tag = qdm->qdm_dma_tag;
1481 qsc->qsc_bulk_req_params_buf_paddr =
1482 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1483 u.qsc_bulk_cookie.qsbc_req_params_buf);
1484 qsc->qsc_buffer_list_desc_paddr =
1485 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1487 qsc->qsc_iv_buf_paddr =
1488 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1490 qsc->qsc_auth_res_paddr =
1491 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1493 qsc->qsc_gcm_aad_paddr =
1494 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1496 qsc->qsc_content_desc_paddr =
1497 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1499 qcb->qcb_symck_free[i] = qsc;
1500 qcb->qcb_symck_free_count++;
1502 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),
1503 1, 0, /* alignment, boundary */
1504 BUS_SPACE_MAXADDR, /* lowaddr */
1505 BUS_SPACE_MAXADDR, /* highaddr */
1506 NULL, NULL, /* filter, filterarg */
1507 QAT_MAXLEN, /* maxsize */
1508 QAT_MAXSEG, /* nsegments */
1509 QAT_MAXLEN, /* maxsegsize */
1510 BUS_DMA_COHERENT, /* flags */
1511 NULL, NULL, /* lockfunc, lockarg */
1512 &qsc->qsc_buf_dma_tag);
1516 error = bus_dmamap_create(qsc->qsc_buf_dma_tag,
1517 BUS_DMA_COHERENT, &qsc->qsc_buf_dmamap);
1526 qat_crypto_bank_init(struct qat_softc *sc, struct qat_crypto_bank *qcb)
1528 mtx_init(&qcb->qcb_bank_mtx, "qcb bank", NULL, MTX_DEF);
1530 return qat_crypto_setup_ring(sc, qcb);
1534 qat_crypto_bank_deinit(struct qat_softc *sc, struct qat_crypto_bank *qcb)
1536 struct qat_dmamem *qdm;
1539 for (i = 0; i < QAT_NSYMCOOKIE; i++) {
1540 qdm = &qcb->qcb_symck_dmamems[i];
1541 qat_free_dmamem(sc, qdm);
1543 qat_free_dmamem(sc, &qcb->qcb_sym_tx->qr_dma);
1544 qat_free_dmamem(sc, &qcb->qcb_sym_rx->qr_dma);
1546 mtx_destroy(&qcb->qcb_bank_mtx);
1550 qat_crypto_init(struct qat_softc *sc)
1552 struct qat_crypto *qcy = &sc->sc_crypto;
1553 struct sysctl_ctx_list *ctx;
1554 struct sysctl_oid *oid;
1555 struct sysctl_oid_list *children;
1556 int bank, error, num_banks;
1560 if (sc->sc_hw.qhw_init_arb != NULL)
1561 num_banks = imin(mp_ncpus, sc->sc_hw.qhw_num_banks);
1563 num_banks = sc->sc_ae_num;
1565 qcy->qcy_num_banks = num_banks;
1568 qat_alloc_mem(sizeof(struct qat_crypto_bank) * num_banks);
1570 for (bank = 0; bank < num_banks; bank++) {
1571 struct qat_crypto_bank *qcb = &qcy->qcy_banks[bank];
1572 qcb->qcb_bank = bank;
1573 error = qat_crypto_bank_init(sc, qcb);
1578 mtx_init(&qcy->qcy_crypto_mtx, "qcy crypto", NULL, MTX_DEF);
1580 ctx = device_get_sysctl_ctx(sc->sc_dev);
1581 oid = device_get_sysctl_tree(sc->sc_dev);
1582 children = SYSCTL_CHILDREN(oid);
1583 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
1584 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics");
1585 children = SYSCTL_CHILDREN(oid);
1587 sc->sc_gcm_aad_restarts = counter_u64_alloc(M_WAITOK);
1588 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_aad_restarts",
1589 CTLFLAG_RD, &sc->sc_gcm_aad_restarts,
1590 "GCM requests deferred due to AAD size change");
1591 sc->sc_gcm_aad_updates = counter_u64_alloc(M_WAITOK);
1592 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_aad_updates",
1593 CTLFLAG_RD, &sc->sc_gcm_aad_updates,
1594 "GCM requests that required session state update");
1595 sc->sc_ring_full_restarts = counter_u64_alloc(M_WAITOK);
1596 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ring_full",
1597 CTLFLAG_RD, &sc->sc_ring_full_restarts,
1598 "Requests deferred due to in-flight max reached");
1604 qat_crypto_deinit(struct qat_softc *sc)
1606 struct qat_crypto *qcy = &sc->sc_crypto;
1607 struct qat_crypto_bank *qcb;
1610 if (qcy->qcy_banks != NULL) {
1611 for (bank = 0; bank < qcy->qcy_num_banks; bank++) {
1612 qcb = &qcy->qcy_banks[bank];
1613 qat_crypto_bank_deinit(sc, qcb);
1615 qat_free_mem(qcy->qcy_banks);
1616 mtx_destroy(&qcy->qcy_crypto_mtx);
1621 qat_crypto_start(struct qat_softc *sc)
1623 struct qat_crypto *qcy;
1625 qcy = &sc->sc_crypto;
1626 qcy->qcy_cid = crypto_get_driverid(sc->sc_dev,
1627 sizeof(struct qat_session), CRYPTOCAP_F_HARDWARE);
1628 if (qcy->qcy_cid < 0) {
1629 device_printf(sc->sc_dev,
1630 "could not get opencrypto driver id\n");
1638 qat_crypto_stop(struct qat_softc *sc)
1640 struct qat_crypto *qcy;
1642 qcy = &sc->sc_crypto;
1643 if (qcy->qcy_cid >= 0)
1644 (void)crypto_unregister_all(qcy->qcy_cid);
1648 qat_crypto_sym_rxintr(struct qat_softc *sc, void *arg, void *msg)
1650 char icv[QAT_SYM_HASH_BUFFER_LEN];
1651 struct qat_crypto_bank *qcb = arg;
1652 struct qat_crypto *qcy;
1653 struct qat_session *qs;
1654 struct qat_sym_cookie *qsc;
1655 struct qat_sym_bulk_cookie *qsbc;
1656 struct cryptop *crp;
1661 qsc = *(void **)((uintptr_t)msg + sc->sc_hw.qhw_crypto_opaque_offset);
1663 qsbc = &qsc->u.qsc_bulk_cookie;
1664 qcy = qsbc->qsbc_crypto;
1665 qs = qsbc->qsbc_session;
1666 crp = qsbc->qsbc_cb_tag;
1668 bus_dmamap_sync(qsc->qsc_self_dma_tag, qsc->qsc_self_dmamap,
1669 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1670 bus_dmamap_sync(qsc->qsc_buf_dma_tag, qsc->qsc_buf_dmamap,
1671 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1672 bus_dmamap_unload(qsc->qsc_buf_dma_tag, qsc->qsc_buf_dmamap);
1675 if ((auth_sz = qs->qs_auth_mlen) != 0) {
1676 if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) {
1677 crypto_copydata(crp, crp->crp_digest_start,
1679 if (timingsafe_bcmp(icv, qsc->qsc_auth_res,
1684 crypto_copyback(crp, crp->crp_digest_start,
1685 auth_sz, qsc->qsc_auth_res);
1689 qat_crypto_free_sym_cookie(qcb, qsc);
1692 mtx_lock(&qs->qs_session_mtx);
1693 MPASS(qs->qs_status & QAT_SESSION_STATUS_ACTIVE);
1695 if (__predict_false(qs->qs_need_wakeup && qs->qs_inflight == 0)) {
1697 qs->qs_need_wakeup = false;
1699 mtx_unlock(&qs->qs_session_mtx);
1701 crp->crp_etype = error;
1705 crypto_unblock(qcy->qcy_cid, CRYPTO_SYMQ);
1711 qat_probesession(device_t dev, const struct crypto_session_params *csp)
1713 if (csp->csp_cipher_alg == CRYPTO_AES_XTS &&
1714 qat_lookup(dev)->qatp_chip == QAT_CHIP_C2XXX) {
1716 * AES-XTS is not supported by the NanoQAT.
1721 switch (csp->csp_mode) {
1722 case CSP_MODE_CIPHER:
1723 switch (csp->csp_cipher_alg) {
1724 case CRYPTO_AES_CBC:
1725 case CRYPTO_AES_ICM:
1726 if (csp->csp_ivlen != AES_BLOCK_LEN)
1729 case CRYPTO_AES_XTS:
1730 if (csp->csp_ivlen != AES_XTS_IV_LEN)
1737 case CSP_MODE_DIGEST:
1738 switch (csp->csp_auth_alg) {
1740 case CRYPTO_SHA1_HMAC:
1741 case CRYPTO_SHA2_256:
1742 case CRYPTO_SHA2_256_HMAC:
1743 case CRYPTO_SHA2_384:
1744 case CRYPTO_SHA2_384_HMAC:
1745 case CRYPTO_SHA2_512:
1746 case CRYPTO_SHA2_512_HMAC:
1748 case CRYPTO_AES_NIST_GMAC:
1749 if (csp->csp_ivlen != AES_GCM_IV_LEN)
1757 switch (csp->csp_cipher_alg) {
1758 case CRYPTO_AES_NIST_GCM_16:
1759 if (csp->csp_ivlen != AES_GCM_IV_LEN)
1767 switch (csp->csp_auth_alg) {
1768 case CRYPTO_SHA1_HMAC:
1769 case CRYPTO_SHA2_256_HMAC:
1770 case CRYPTO_SHA2_384_HMAC:
1771 case CRYPTO_SHA2_512_HMAC:
1772 switch (csp->csp_cipher_alg) {
1773 case CRYPTO_AES_CBC:
1774 case CRYPTO_AES_ICM:
1775 if (csp->csp_ivlen != AES_BLOCK_LEN)
1778 case CRYPTO_AES_XTS:
1779 if (csp->csp_ivlen != AES_XTS_IV_LEN)
1794 return CRYPTODEV_PROBE_HARDWARE;
1798 qat_newsession(device_t dev, crypto_session_t cses,
1799 const struct crypto_session_params *csp)
1801 struct qat_crypto *qcy;
1802 struct qat_dmamem *qdm;
1803 struct qat_session *qs;
1804 struct qat_softc *sc;
1805 struct qat_crypto_desc *ddesc, *edesc;
1808 sc = device_get_softc(dev);
1809 qs = crypto_get_driver_session(cses);
1810 qcy = &sc->sc_crypto;
1812 qdm = &qs->qs_desc_mem;
1813 error = qat_alloc_dmamem(sc, qdm, QAT_MAXSEG,
1814 sizeof(struct qat_crypto_desc) * 2, QAT_OPTIMAL_ALIGN);
1818 mtx_init(&qs->qs_session_mtx, "qs session", NULL, MTX_DEF);
1819 qs->qs_aad_length = -1;
1821 qs->qs_dec_desc = ddesc = qdm->qdm_dma_vaddr;
1822 qs->qs_enc_desc = edesc = ddesc + 1;
1824 ddesc->qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr;
1825 ddesc->qcd_hash_state_paddr = ddesc->qcd_desc_paddr +
1826 offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf);
1827 edesc->qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr +
1828 sizeof(struct qat_crypto_desc);
1829 edesc->qcd_hash_state_paddr = edesc->qcd_desc_paddr +
1830 offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf);
1832 qs->qs_status = QAT_SESSION_STATUS_ACTIVE;
1833 qs->qs_inflight = 0;
1835 qs->qs_cipher_key = csp->csp_cipher_key;
1836 qs->qs_cipher_klen = csp->csp_cipher_klen;
1837 qs->qs_auth_key = csp->csp_auth_key;
1838 qs->qs_auth_klen = csp->csp_auth_klen;
1840 switch (csp->csp_cipher_alg) {
1841 case CRYPTO_AES_CBC:
1842 qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen);
1843 qs->qs_cipher_mode = HW_CIPHER_CBC_MODE;
1845 case CRYPTO_AES_ICM:
1846 qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen);
1847 qs->qs_cipher_mode = HW_CIPHER_CTR_MODE;
1849 case CRYPTO_AES_XTS:
1850 qs->qs_cipher_algo =
1851 qat_aes_cipher_algo(csp->csp_cipher_klen / 2);
1852 qs->qs_cipher_mode = HW_CIPHER_XTS_MODE;
1854 case CRYPTO_AES_NIST_GCM_16:
1855 qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen);
1856 qs->qs_cipher_mode = HW_CIPHER_CTR_MODE;
1857 qs->qs_auth_algo = HW_AUTH_ALGO_GALOIS_128;
1858 qs->qs_auth_mode = HW_AUTH_MODE1;
1863 panic("%s: unhandled cipher algorithm %d", __func__,
1864 csp->csp_cipher_alg);
1867 switch (csp->csp_auth_alg) {
1868 case CRYPTO_SHA1_HMAC:
1869 qs->qs_auth_algo = HW_AUTH_ALGO_SHA1;
1870 qs->qs_auth_mode = HW_AUTH_MODE1;
1873 qs->qs_auth_algo = HW_AUTH_ALGO_SHA1;
1874 qs->qs_auth_mode = HW_AUTH_MODE0;
1876 case CRYPTO_SHA2_256_HMAC:
1877 qs->qs_auth_algo = HW_AUTH_ALGO_SHA256;
1878 qs->qs_auth_mode = HW_AUTH_MODE1;
1880 case CRYPTO_SHA2_256:
1881 qs->qs_auth_algo = HW_AUTH_ALGO_SHA256;
1882 qs->qs_auth_mode = HW_AUTH_MODE0;
1884 case CRYPTO_SHA2_384_HMAC:
1885 qs->qs_auth_algo = HW_AUTH_ALGO_SHA384;
1886 qs->qs_auth_mode = HW_AUTH_MODE1;
1888 case CRYPTO_SHA2_384:
1889 qs->qs_auth_algo = HW_AUTH_ALGO_SHA384;
1890 qs->qs_auth_mode = HW_AUTH_MODE0;
1892 case CRYPTO_SHA2_512_HMAC:
1893 qs->qs_auth_algo = HW_AUTH_ALGO_SHA512;
1894 qs->qs_auth_mode = HW_AUTH_MODE1;
1896 case CRYPTO_SHA2_512:
1897 qs->qs_auth_algo = HW_AUTH_ALGO_SHA512;
1898 qs->qs_auth_mode = HW_AUTH_MODE0;
1900 case CRYPTO_AES_NIST_GMAC:
1901 qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_auth_klen);
1902 qs->qs_cipher_mode = HW_CIPHER_CTR_MODE;
1903 qs->qs_auth_algo = HW_AUTH_ALGO_GALOIS_128;
1904 qs->qs_auth_mode = HW_AUTH_MODE1;
1906 qs->qs_cipher_key = qs->qs_auth_key;
1907 qs->qs_cipher_klen = qs->qs_auth_klen;
1912 panic("%s: unhandled auth algorithm %d", __func__,
1917 switch (csp->csp_mode) {
1920 /* auth then decrypt */
1921 ddesc->qcd_slices[0] = FW_SLICE_AUTH;
1922 ddesc->qcd_slices[1] = FW_SLICE_CIPHER;
1923 ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT;
1924 ddesc->qcd_cmd_id = FW_LA_CMD_HASH_CIPHER;
1925 /* encrypt then auth */
1926 edesc->qcd_slices[0] = FW_SLICE_CIPHER;
1927 edesc->qcd_slices[1] = FW_SLICE_AUTH;
1928 edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT;
1929 edesc->qcd_cmd_id = FW_LA_CMD_CIPHER_HASH;
1932 case CSP_MODE_CIPHER:
1934 ddesc->qcd_slices[0] = FW_SLICE_CIPHER;
1935 ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT;
1936 ddesc->qcd_cmd_id = FW_LA_CMD_CIPHER;
1938 edesc->qcd_slices[0] = FW_SLICE_CIPHER;
1939 edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT;
1940 edesc->qcd_cmd_id = FW_LA_CMD_CIPHER;
1943 case CSP_MODE_DIGEST:
1944 if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
1945 /* auth then decrypt */
1946 ddesc->qcd_slices[0] = FW_SLICE_AUTH;
1947 ddesc->qcd_slices[1] = FW_SLICE_CIPHER;
1948 ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT;
1949 ddesc->qcd_cmd_id = FW_LA_CMD_HASH_CIPHER;
1950 /* encrypt then auth */
1951 edesc->qcd_slices[0] = FW_SLICE_CIPHER;
1952 edesc->qcd_slices[1] = FW_SLICE_AUTH;
1953 edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT;
1954 edesc->qcd_cmd_id = FW_LA_CMD_CIPHER_HASH;
1957 ddesc->qcd_slices[0] = FW_SLICE_AUTH;
1958 ddesc->qcd_cmd_id = FW_LA_CMD_AUTH;
1959 edesc->qcd_slices[0] = FW_SLICE_AUTH;
1960 edesc->qcd_cmd_id = FW_LA_CMD_AUTH;
1965 panic("%s: unhandled crypto algorithm %d, %d", __func__,
1966 csp->csp_cipher_alg, csp->csp_auth_alg);
1968 ddesc->qcd_slices[slices] = FW_SLICE_DRAM_WR;
1969 edesc->qcd_slices[slices] = FW_SLICE_DRAM_WR;
1971 qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, ddesc);
1972 qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, edesc);
1974 if (csp->csp_auth_mlen != 0)
1975 qs->qs_auth_mlen = csp->csp_auth_mlen;
1977 qs->qs_auth_mlen = edesc->qcd_auth_sz;
1979 /* Compute the GMAC by specifying a null cipher payload. */
1980 if (csp->csp_auth_alg == CRYPTO_AES_NIST_GMAC)
1981 ddesc->qcd_cmd_id = edesc->qcd_cmd_id = FW_LA_CMD_AUTH;
1987 qat_crypto_clear_desc(struct qat_crypto_desc *desc)
1989 explicit_bzero(desc->qcd_content_desc, sizeof(desc->qcd_content_desc));
1990 explicit_bzero(desc->qcd_hash_state_prefix_buf,
1991 sizeof(desc->qcd_hash_state_prefix_buf));
1992 explicit_bzero(desc->qcd_req_cache, sizeof(desc->qcd_req_cache));
1996 qat_freesession(device_t dev, crypto_session_t cses)
1998 struct qat_session *qs;
2000 qs = crypto_get_driver_session(cses);
2001 KASSERT(qs->qs_inflight == 0,
2002 ("%s: session %p has requests in flight", __func__, qs));
2004 qat_crypto_clear_desc(qs->qs_enc_desc);
2005 qat_crypto_clear_desc(qs->qs_dec_desc);
2006 qat_free_dmamem(device_get_softc(dev), &qs->qs_desc_mem);
2007 mtx_destroy(&qs->qs_session_mtx);
2011 qat_process(device_t dev, struct cryptop *crp, int hint)
2013 struct qat_crypto *qcy;
2014 struct qat_crypto_bank *qcb;
2015 struct qat_crypto_desc const *desc;
2016 struct qat_session *qs;
2017 struct qat_softc *sc;
2018 struct qat_sym_cookie *qsc;
2019 struct qat_sym_bulk_cookie *qsbc;
2022 sc = device_get_softc(dev);
2023 qcy = &sc->sc_crypto;
2024 qs = crypto_get_driver_session(crp->crp_session);
2027 if (__predict_false(crypto_buffer_len(&crp->crp_buf) > QAT_MAXLEN)) {
2032 mtx_lock(&qs->qs_session_mtx);
2033 if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
2034 if (crp->crp_aad_length > QAT_GCM_AAD_SIZE_MAX) {
2036 mtx_unlock(&qs->qs_session_mtx);
2041 * The firmware interface for GCM annoyingly requires the AAD
2042 * size to be stored in the session's content descriptor, which
2043 * is not really meant to be updated after session
2044 * initialization. For IPSec the AAD size is fixed so this is
2045 * not much of a problem in practice, but we have to catch AAD
2046 * size updates here so that the device code can safely update
2047 * the session's recorded AAD size.
2049 if (__predict_false(crp->crp_aad_length != qs->qs_aad_length)) {
2050 if (qs->qs_inflight == 0) {
2051 if (qs->qs_aad_length != -1) {
2052 counter_u64_add(sc->sc_gcm_aad_updates,
2055 qs->qs_aad_length = crp->crp_aad_length;
2057 qs->qs_need_wakeup = true;
2058 mtx_unlock(&qs->qs_session_mtx);
2059 counter_u64_add(sc->sc_gcm_aad_restarts, 1);
2066 mtx_unlock(&qs->qs_session_mtx);
2068 qcb = qat_crypto_select_bank(qcy);
2070 qsc = qat_crypto_alloc_sym_cookie(qcb);
2076 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2077 desc = qs->qs_enc_desc;
2079 desc = qs->qs_dec_desc;
2081 error = qat_crypto_load(qs, qsc, desc, crp);
2085 qsbc = &qsc->u.qsc_bulk_cookie;
2086 qsbc->qsbc_crypto = qcy;
2087 qsbc->qsbc_session = qs;
2088 qsbc->qsbc_cb_tag = crp;
2090 sc->sc_hw.qhw_crypto_setup_req_params(qcb, qs, desc, qsc, crp);
2092 bus_dmamap_sync(qsc->qsc_buf_dma_tag, qsc->qsc_buf_dmamap,
2093 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2094 bus_dmamap_sync(qsc->qsc_self_dma_tag, qsc->qsc_self_dmamap,
2095 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2097 error = qat_etr_put_msg(sc, qcb->qcb_sym_tx,
2098 (uint32_t *)qsbc->qsbc_msg);
2106 qat_crypto_free_sym_cookie(qcb, qsc);
2107 mtx_lock(&qs->qs_session_mtx);
2109 mtx_unlock(&qs->qs_session_mtx);
2111 crp->crp_etype = error;
2116 static device_method_t qat_methods[] = {
2117 /* Device interface */
2118 DEVMETHOD(device_probe, qat_probe),
2119 DEVMETHOD(device_attach, qat_attach),
2120 DEVMETHOD(device_detach, qat_detach),
2122 /* Cryptodev interface */
2123 DEVMETHOD(cryptodev_probesession, qat_probesession),
2124 DEVMETHOD(cryptodev_newsession, qat_newsession),
2125 DEVMETHOD(cryptodev_freesession, qat_freesession),
2126 DEVMETHOD(cryptodev_process, qat_process),
2131 static devclass_t qat_devclass;
2133 static driver_t qat_driver = {
2135 .methods = qat_methods,
2136 .size = sizeof(struct qat_softc),
2139 DRIVER_MODULE(qat, pci, qat_driver, qat_devclass, 0, 0);
2140 MODULE_VERSION(qat, 1);
2141 MODULE_DEPEND(qat, crypto, 1, 1, 1);
2142 MODULE_DEPEND(qat, pci, 1, 1, 1);