1 /* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
2 /* $NetBSD: qat_hw17.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $ */
5 * Copyright (c) 2019 Internet Initiative Japan, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
31 * Copyright(c) 2014 Intel Corporation.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
36 * * Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * * Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in
40 * the documentation and/or other materials provided with the
42 * * Neither the name of Intel Corporation nor the names of its
43 * contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
47 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
48 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
49 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
50 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
51 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
52 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
56 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
62 __KERNEL_RCSID(0, "$NetBSD: qat_hw17.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
65 #include <sys/param.h>
66 #include <sys/systm.h>
70 #include <machine/bus.h>
72 #include <opencrypto/xform.h>
74 #include <dev/pci/pcireg.h>
75 #include <dev/pci/pcivar.h>
78 #include "qat_hw17reg.h"
80 #include "qat_hw17var.h"
82 int qat_adm_mailbox_put_msg_sync(struct qat_softc *, uint32_t,
84 int qat_adm_mailbox_send(struct qat_softc *,
85 struct fw_init_admin_req *, struct fw_init_admin_resp *);
86 int qat_adm_mailbox_send_init_me(struct qat_softc *);
87 int qat_adm_mailbox_send_hb_timer(struct qat_softc *);
88 int qat_adm_mailbox_send_fw_status(struct qat_softc *);
89 int qat_adm_mailbox_send_constants(struct qat_softc *);
92 qat_adm_mailbox_init(struct qat_softc *sc)
96 struct qat_dmamem *qdm;
98 error = qat_alloc_dmamem(sc, &sc->sc_admin_comms.qadc_dma, 1,
99 PAGE_SIZE, PAGE_SIZE);
103 qdm = &sc->sc_admin_comms.qadc_const_tbl_dma;
104 error = qat_alloc_dmamem(sc, qdm, 1, PAGE_SIZE, PAGE_SIZE);
108 memcpy(qdm->qdm_dma_vaddr,
109 mailbox_const_tab, sizeof(mailbox_const_tab));
111 bus_dmamap_sync(qdm->qdm_dma_tag, qdm->qdm_dma_map,
112 BUS_DMASYNC_PREWRITE);
114 error = qat_alloc_dmamem(sc, &sc->sc_admin_comms.qadc_hb_dma, 1,
115 PAGE_SIZE, PAGE_SIZE);
119 addr = (uint64_t)sc->sc_admin_comms.qadc_dma.qdm_dma_seg.ds_addr;
120 qat_misc_write_4(sc, ADMINMSGUR, addr >> 32);
121 qat_misc_write_4(sc, ADMINMSGLR, addr);
127 qat_adm_mailbox_put_msg_sync(struct qat_softc *sc, uint32_t ae,
130 struct qat_dmamem *qdm;
132 bus_size_t mb_offset = MAILBOX_BASE + (ae * MAILBOX_STRIDE);
133 int offset = ae * ADMINMSG_LEN * 2;
135 uint8_t *buf = (uint8_t *)sc->sc_admin_comms.qadc_dma.qdm_dma_vaddr + offset;
137 mailbox = qat_misc_read_4(sc, mb_offset);
141 qdm = &sc->sc_admin_comms.qadc_dma;
142 memcpy(buf, in, ADMINMSG_LEN);
143 bus_dmamap_sync(qdm->qdm_dma_tag, qdm->qdm_dma_map,
144 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
145 qat_misc_write_4(sc, mb_offset, 1);
148 for (times = 0; times < 50; times++) {
150 if (qat_misc_read_4(sc, mb_offset) == 0) {
156 bus_dmamap_sync(qdm->qdm_dma_tag, qdm->qdm_dma_map,
157 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
158 memcpy(out, buf + ADMINMSG_LEN, ADMINMSG_LEN);
160 device_printf(sc->sc_dev,
161 "Failed to send admin msg to accelerator\n");
164 return received ? 0 : EFAULT;
168 qat_adm_mailbox_send(struct qat_softc *sc,
169 struct fw_init_admin_req *req, struct fw_init_admin_resp *resp)
175 for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
179 error = qat_adm_mailbox_put_msg_sync(sc, ae, req, resp);
182 if (resp->init_resp_hdr.status) {
183 device_printf(sc->sc_dev,
184 "Failed to send admin msg: cmd %d\n",
185 req->init_admin_cmd_id);
194 qat_adm_mailbox_send_init_me(struct qat_softc *sc)
196 struct fw_init_admin_req req;
197 struct fw_init_admin_resp resp;
199 memset(&req, 0, sizeof(req));
200 req.init_admin_cmd_id = FW_INIT_ME;
202 return qat_adm_mailbox_send(sc, &req, &resp);
206 qat_adm_mailbox_send_hb_timer(struct qat_softc *sc)
208 struct fw_init_admin_req req;
209 struct fw_init_admin_resp resp;
211 memset(&req, 0, sizeof(req));
212 req.init_admin_cmd_id = FW_HEARTBEAT_TIMER_SET;
214 req.init_cfg_ptr = sc->sc_admin_comms.qadc_hb_dma.qdm_dma_seg.ds_addr;
215 req.heartbeat_ticks =
216 sc->sc_hw.qhw_clock_per_sec / 1000 * QAT_HB_INTERVAL;
218 return qat_adm_mailbox_send(sc, &req, &resp);
222 qat_adm_mailbox_send_fw_status(struct qat_softc *sc)
225 struct fw_init_admin_req req;
226 struct fw_init_admin_resp resp;
228 memset(&req, 0, sizeof(req));
229 req.init_admin_cmd_id = FW_STATUS_GET;
231 error = qat_adm_mailbox_send(sc, &req, &resp);
239 qat_adm_mailbox_send_constants(struct qat_softc *sc)
241 struct fw_init_admin_req req;
242 struct fw_init_admin_resp resp;
244 memset(&req, 0, sizeof(req));
245 req.init_admin_cmd_id = FW_CONSTANTS_CFG;
247 req.init_cfg_sz = 1024;
249 sc->sc_admin_comms.qadc_const_tbl_dma.qdm_dma_seg.ds_addr;
251 return qat_adm_mailbox_send(sc, &req, &resp);
255 qat_adm_mailbox_send_init(struct qat_softc *sc)
259 error = qat_adm_mailbox_send_init_me(sc);
263 error = qat_adm_mailbox_send_hb_timer(sc);
267 error = qat_adm_mailbox_send_fw_status(sc);
271 return qat_adm_mailbox_send_constants(sc);
275 qat_arb_init(struct qat_softc *sc)
277 uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
279 const uint32_t *thd_2_arb_cfg;
281 /* Service arb configured for 32 bytes responses and
282 * ring flow control check enabled. */
283 for (arb = 0; arb < MAX_ARB; arb++)
284 qat_arb_sarconfig_write_4(sc, arb, arb_cfg);
286 /* Map worker threads to service arbiters */
287 sc->sc_hw.qhw_get_arb_mapping(sc, &thd_2_arb_cfg);
292 for (i = 0; i < sc->sc_hw.qhw_num_engines; i++)
293 qat_arb_wrk_2_ser_map_write_4(sc, i, *(thd_2_arb_cfg + i));
299 qat_set_ssm_wdtimer(struct qat_softc *sc)
305 timer = sc->sc_hw.qhw_clock_per_sec / 1000 * QAT_SSM_WDT;
306 for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
309 qat_misc_write_4(sc, SSMWDT(i), timer);
310 qat_misc_write_4(sc, SSMWDTPKE(i), timer);
317 qat_check_slice_hang(struct qat_softc *sc)
325 qat_hw17_crypto_setup_cipher_ctrl(struct qat_crypto_desc *desc,
326 struct qat_session *qs, uint32_t cd_blk_offset,
327 struct fw_la_bulk_req *req_tmpl, enum fw_slice next_slice)
329 struct fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
330 (struct fw_cipher_cd_ctrl_hdr *)&req_tmpl->cd_ctrl;
332 desc->qcd_cipher_blk_sz = HW_AES_BLK_SZ;
333 desc->qcd_cipher_offset = cd_blk_offset;
335 cipher_cd_ctrl->cipher_state_sz = desc->qcd_cipher_blk_sz >> 3;
336 cipher_cd_ctrl->cipher_key_sz = qs->qs_cipher_klen >> 3;
337 cipher_cd_ctrl->cipher_cfg_offset = cd_blk_offset >> 3;
338 FW_COMN_CURR_ID_SET(cipher_cd_ctrl, FW_SLICE_CIPHER);
339 FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, next_slice);
341 return roundup(sizeof(struct hw_cipher_config) + qs->qs_cipher_klen, 8);
345 qat_hw17_crypto_setup_cipher_cdesc(const struct qat_crypto_desc *desc,
346 const struct qat_session *qs, const struct cryptop *crp,
347 union hw_cipher_algo_blk *cipher)
351 cipher->max.cipher_config.val =
352 qat_crypto_load_cipher_session(desc, qs);
353 if (crp != NULL && crp->crp_cipher_key != NULL)
354 key = crp->crp_cipher_key;
356 key = qs->qs_cipher_key;
357 memcpy(cipher->max.key, key, qs->qs_cipher_klen);
361 qat_hw17_crypto_setup_auth_ctrl(struct qat_crypto_desc *desc,
362 struct qat_session *qs, uint32_t cd_blk_offset,
363 struct fw_la_bulk_req *req_tmpl, enum fw_slice next_slice)
365 struct fw_auth_cd_ctrl_hdr *auth_cd_ctrl =
366 (struct fw_auth_cd_ctrl_hdr *)&req_tmpl->cd_ctrl;
367 struct qat_sym_hash_def const *hash_def;
369 (void)qat_crypto_load_auth_session(desc, qs, &hash_def);
371 auth_cd_ctrl->hash_cfg_offset = cd_blk_offset >> 3;
372 auth_cd_ctrl->hash_flags = FW_AUTH_HDR_FLAG_NO_NESTED;
373 auth_cd_ctrl->inner_res_sz = hash_def->qshd_alg->qshai_digest_len;
374 auth_cd_ctrl->final_sz = hash_def->qshd_alg->qshai_sah->hashsize;
376 auth_cd_ctrl->inner_state1_sz =
377 roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
378 auth_cd_ctrl->inner_state2_sz =
379 roundup(hash_def->qshd_qat->qshqi_state2_len, 8);
380 auth_cd_ctrl->inner_state2_offset =
381 auth_cd_ctrl->hash_cfg_offset +
382 ((sizeof(struct hw_auth_setup) +
383 auth_cd_ctrl->inner_state1_sz) >> 3);
385 FW_COMN_CURR_ID_SET(auth_cd_ctrl, FW_SLICE_AUTH);
386 FW_COMN_NEXT_ID_SET(auth_cd_ctrl, next_slice);
388 desc->qcd_auth_sz = auth_cd_ctrl->final_sz;
389 desc->qcd_auth_offset = cd_blk_offset;
390 desc->qcd_gcm_aad_sz_offset1 =
391 cd_blk_offset + offsetof(union hw_auth_algo_blk, max.state1) +
392 auth_cd_ctrl->inner_state1_sz + AES_BLOCK_LEN;
394 return roundup(auth_cd_ctrl->inner_state1_sz +
395 auth_cd_ctrl->inner_state2_sz +
396 sizeof(struct hw_auth_setup), 8);
400 qat_hw17_crypto_setup_auth_cdesc(const struct qat_crypto_desc *desc,
401 const struct qat_session *qs, const struct cryptop *crp,
402 union hw_auth_algo_blk *auth)
404 struct qat_sym_hash_def const *hash_def;
405 uint8_t inner_state1_sz, *state1, *state2;
408 auth->max.inner_setup.auth_config.config =
409 qat_crypto_load_auth_session(desc, qs, &hash_def);
410 auth->max.inner_setup.auth_counter.counter =
411 htobe32(hash_def->qshd_qat->qshqi_auth_counter);
412 inner_state1_sz = roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
414 state1 = auth->max.state1;
415 state2 = auth->max.state1 + inner_state1_sz;
416 switch (qs->qs_auth_algo) {
417 case HW_AUTH_ALGO_GALOIS_128:
419 if (crp != NULL && crp->crp_cipher_key != NULL)
420 key = crp->crp_cipher_key;
421 else if (qs->qs_cipher_key != NULL)
422 key = qs->qs_cipher_key;
424 qat_crypto_gmac_precompute(desc, key,
425 qs->qs_cipher_klen, hash_def, state2);
428 case HW_AUTH_ALGO_SHA1:
429 case HW_AUTH_ALGO_SHA256:
430 case HW_AUTH_ALGO_SHA384:
431 case HW_AUTH_ALGO_SHA512:
432 switch (qs->qs_auth_mode) {
434 memcpy(state1, hash_def->qshd_alg->qshai_init_state,
436 /* Override for mode 0 hashes. */
437 auth->max.inner_setup.auth_counter.counter = 0;
440 if (crp != NULL && crp->crp_auth_key != NULL)
441 key = crp->crp_auth_key;
443 key = qs->qs_auth_key;
445 qat_crypto_hmac_precompute(desc, key,
446 qs->qs_auth_klen, hash_def, state1, state2);
450 panic("%s: unhandled auth mode %d", __func__,
455 panic("%s: unhandled auth algorithm %d", __func__,
461 qat_hw17_init_comn_req_hdr(struct qat_crypto_desc *desc,
462 struct fw_la_bulk_req *req)
464 union fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
465 struct fw_comn_req_hdr *req_hdr = &req->comn_hdr;
467 req_hdr->service_cmd_id = desc->qcd_cmd_id;
468 req_hdr->hdr_flags = FW_COMN_VALID;
469 req_hdr->service_type = FW_COMN_REQ_CPM_FW_LA;
470 req_hdr->comn_req_flags = FW_COMN_FLAGS_BUILD(
471 COMN_CD_FLD_TYPE_64BIT_ADR, COMN_PTR_TYPE_SGL);
472 req_hdr->serv_specif_flags = 0;
473 cd_pars->s.content_desc_addr = desc->qcd_desc_paddr;
477 qat_hw17_crypto_setup_desc(struct qat_crypto *qcy, struct qat_session *qs,
478 struct qat_crypto_desc *desc)
480 union hw_cipher_algo_blk *cipher;
481 union hw_auth_algo_blk *auth;
482 struct fw_la_bulk_req *req_tmpl;
483 struct fw_comn_req_hdr *req_hdr;
484 uint32_t cd_blk_offset = 0;
488 req_tmpl = (struct fw_la_bulk_req *)desc->qcd_req_cache;
489 req_hdr = &req_tmpl->comn_hdr;
490 cd_blk_ptr = desc->qcd_content_desc;
492 memset(req_tmpl, 0, sizeof(struct fw_la_bulk_req));
493 qat_hw17_init_comn_req_hdr(desc, req_tmpl);
495 for (i = 0; i < MAX_FW_SLICE; i++) {
496 switch (desc->qcd_slices[i]) {
497 case FW_SLICE_CIPHER:
498 cipher = (union hw_cipher_algo_blk *)(cd_blk_ptr +
500 cd_blk_offset += qat_hw17_crypto_setup_cipher_ctrl(desc,
501 qs, cd_blk_offset, req_tmpl,
502 desc->qcd_slices[i + 1]);
503 qat_hw17_crypto_setup_cipher_cdesc(desc, qs, NULL,
507 auth = (union hw_auth_algo_blk *)(cd_blk_ptr +
509 cd_blk_offset += qat_hw17_crypto_setup_auth_ctrl(desc,
510 qs, cd_blk_offset, req_tmpl,
511 desc->qcd_slices[i + 1]);
512 qat_hw17_crypto_setup_auth_cdesc(desc, qs, NULL, auth);
513 req_hdr->serv_specif_flags |= FW_LA_RET_AUTH_RES;
515 case FW_SLICE_DRAM_WR:
516 i = MAX_FW_SLICE; /* end of chain */
524 req_tmpl->cd_pars.s.content_desc_params_sz =
525 roundup(cd_blk_offset, QAT_OPTIMAL_ALIGN) >> 3;
526 if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128)
527 req_hdr->serv_specif_flags |=
528 FW_LA_PROTO_GCM | FW_LA_GCM_IV_LEN_12_OCTETS;
530 bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
531 qs->qs_desc_mem.qdm_dma_map, BUS_DMASYNC_PREWRITE);
535 qat_hw17_crypto_req_setkey(const struct qat_crypto_desc *desc,
536 const struct qat_session *qs, struct qat_sym_cookie *qsc,
537 struct fw_la_bulk_req *bulk_req, const struct cryptop *crp)
539 union hw_auth_algo_blk *auth;
540 union hw_cipher_algo_blk *cipher;
544 cdesc = qsc->qsc_content_desc;
545 memcpy(cdesc, desc->qcd_content_desc, CONTENT_DESC_MAX_SIZE);
546 for (i = 0; i < MAX_FW_SLICE; i++) {
547 switch (desc->qcd_slices[i]) {
548 case FW_SLICE_CIPHER:
549 cipher = (union hw_cipher_algo_blk *)
550 (cdesc + desc->qcd_cipher_offset);
551 qat_hw17_crypto_setup_cipher_cdesc(desc, qs, crp,
555 auth = (union hw_auth_algo_blk *)
556 (cdesc + desc->qcd_auth_offset);
557 qat_hw17_crypto_setup_auth_cdesc(desc, qs, crp, auth);
559 case FW_SLICE_DRAM_WR:
560 i = MAX_FW_SLICE; /* end of chain */
567 bulk_req->cd_pars.s.content_desc_addr = qsc->qsc_content_desc_paddr;
571 qat_hw17_crypto_setup_req_params(struct qat_crypto_bank *qcb __unused,
572 struct qat_session *qs, const struct qat_crypto_desc *desc,
573 struct qat_sym_cookie *qsc, struct cryptop *crp)
575 struct qat_sym_bulk_cookie *qsbc;
576 struct fw_la_bulk_req *bulk_req;
577 struct fw_la_cipher_req_params *cipher_param;
578 struct fw_la_auth_req_params *auth_param;
579 bus_addr_t digest_paddr;
580 uint32_t aad_sz, *aad_szp;
581 uint8_t *req_params_ptr;
582 enum fw_la_cmd_id cmd_id = desc->qcd_cmd_id;
584 qsbc = &qsc->qsc_bulk_cookie;
585 bulk_req = (struct fw_la_bulk_req *)qsbc->qsbc_msg;
587 memcpy(bulk_req, desc->qcd_req_cache, sizeof(struct fw_la_bulk_req));
588 bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc;
589 bulk_req->comn_mid.src_data_addr = qsc->qsc_buffer_list_desc_paddr;
590 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
591 bulk_req->comn_mid.dest_data_addr =
592 qsc->qsc_obuffer_list_desc_paddr;
594 bulk_req->comn_mid.dest_data_addr =
595 qsc->qsc_buffer_list_desc_paddr;
597 if (__predict_false(crp->crp_cipher_key != NULL ||
598 crp->crp_auth_key != NULL))
599 qat_hw17_crypto_req_setkey(desc, qs, qsc, bulk_req, crp);
602 if (desc->qcd_auth_sz != 0)
603 digest_paddr = qsc->qsc_auth_res_paddr;
605 req_params_ptr = (uint8_t *)&bulk_req->serv_specif_rqpars;
606 cipher_param = (struct fw_la_cipher_req_params *)req_params_ptr;
607 auth_param = (struct fw_la_auth_req_params *)
608 (req_params_ptr + sizeof(struct fw_la_cipher_req_params));
610 cipher_param->u.s.cipher_IV_ptr = qsc->qsc_iv_buf_paddr;
613 * The SG list layout is a bit different for GCM and GMAC, it's simpler
614 * to handle those cases separately.
616 if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
617 if (cmd_id != FW_LA_CMD_AUTH) {
619 * Don't fill out the cipher block if we're doing GMAC
622 cipher_param->cipher_offset = 0;
623 cipher_param->cipher_length = crp->crp_payload_length;
625 auth_param->auth_off = 0;
626 auth_param->auth_len = crp->crp_payload_length;
627 auth_param->auth_res_addr = digest_paddr;
628 auth_param->auth_res_sz = desc->qcd_auth_sz;
629 auth_param->u1.aad_adr =
630 crp->crp_aad_length > 0 ? qsc->qsc_gcm_aad_paddr : 0;
631 auth_param->u2.aad_sz =
632 roundup2(crp->crp_aad_length, QAT_AES_GCM_AAD_ALIGN);
633 auth_param->hash_state_sz = auth_param->u2.aad_sz >> 3;
636 * Update the hash state block if necessary. This only occurs
637 * when the AAD length changes between requests in a session and
638 * is synchronized by qat_process().
640 aad_sz = htobe32(crp->crp_aad_length);
641 aad_szp = (uint32_t *)(
642 __DECONST(uint8_t *, desc->qcd_content_desc) +
643 desc->qcd_gcm_aad_sz_offset1);
644 if (__predict_false(*aad_szp != aad_sz)) {
646 bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
647 qs->qs_desc_mem.qdm_dma_map,
648 BUS_DMASYNC_PREWRITE);
651 if (cmd_id != FW_LA_CMD_AUTH) {
652 if (crp->crp_aad_length == 0) {
653 cipher_param->cipher_offset = 0;
654 } else if (crp->crp_aad == NULL) {
655 cipher_param->cipher_offset =
656 crp->crp_payload_start - crp->crp_aad_start;
658 cipher_param->cipher_offset =
661 cipher_param->cipher_length = crp->crp_payload_length;
663 if (cmd_id != FW_LA_CMD_CIPHER) {
664 auth_param->auth_off = 0;
665 auth_param->auth_len =
666 crp->crp_payload_length + crp->crp_aad_length;
667 auth_param->auth_res_addr = digest_paddr;
668 auth_param->auth_res_sz = desc->qcd_auth_sz;
669 auth_param->u1.aad_adr = 0;
670 auth_param->u2.aad_sz = 0;
671 auth_param->hash_state_sz = 0;