]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/qat/qat_hw17.c
freebsd32: rename fstat() stat buffer argument
[FreeBSD/FreeBSD.git] / sys / dev / qat / qat_hw17.c
1 /* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
2 /*      $NetBSD: qat_hw17.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $      */
3
4 /*
5  * Copyright (c) 2019 Internet Initiative Japan, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 /*
31  *   Copyright(c) 2014 Intel Corporation.
32  *   Redistribution and use in source and binary forms, with or without
33  *   modification, are permitted provided that the following conditions
34  *   are met:
35  *
36  *     * Redistributions of source code must retain the above copyright
37  *       notice, this list of conditions and the following disclaimer.
38  *     * Redistributions in binary form must reproduce the above copyright
39  *       notice, this list of conditions and the following disclaimer in
40  *       the documentation and/or other materials provided with the
41  *       distribution.
42  *     * Neither the name of Intel Corporation nor the names of its
43  *       contributors may be used to endorse or promote products derived
44  *       from this software without specific prior written permission.
45  *
46  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
47  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
48  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
49  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
50  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
51  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
52  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
56  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  */
58
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61 #if 0
62 __KERNEL_RCSID(0, "$NetBSD: qat_hw17.c,v 1.1 2019/11/20 09:37:46 hikaru Exp $");
63 #endif
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/bus.h>
68 #include <sys/proc.h>
69
70 #include <machine/bus.h>
71
72 #include <opencrypto/xform.h>
73
74 #include <dev/pci/pcireg.h>
75 #include <dev/pci/pcivar.h>
76
77 #include "qatreg.h"
78 #include "qat_hw17reg.h"
79 #include "qatvar.h"
80 #include "qat_hw17var.h"
81
82 int             qat_adm_mailbox_put_msg_sync(struct qat_softc *, uint32_t,
83                     void *, void *);
84 int             qat_adm_mailbox_send(struct qat_softc *,
85                     struct fw_init_admin_req *, struct fw_init_admin_resp *);
86 int             qat_adm_mailbox_send_init_me(struct qat_softc *);
87 int             qat_adm_mailbox_send_hb_timer(struct qat_softc *);
88 int             qat_adm_mailbox_send_fw_status(struct qat_softc *);
89 int             qat_adm_mailbox_send_constants(struct qat_softc *);
90
91 int
92 qat_adm_mailbox_init(struct qat_softc *sc)
93 {
94         uint64_t addr;
95         int error;
96         struct qat_dmamem *qdm;
97
98         error = qat_alloc_dmamem(sc, &sc->sc_admin_comms.qadc_dma, 1,
99             PAGE_SIZE, PAGE_SIZE);
100         if (error)
101                 return error;
102
103         qdm = &sc->sc_admin_comms.qadc_const_tbl_dma;
104         error = qat_alloc_dmamem(sc, qdm, 1, PAGE_SIZE, PAGE_SIZE);
105         if (error)
106                 return error;
107
108         memcpy(qdm->qdm_dma_vaddr,
109             mailbox_const_tab, sizeof(mailbox_const_tab));
110
111         bus_dmamap_sync(qdm->qdm_dma_tag, qdm->qdm_dma_map,
112             BUS_DMASYNC_PREWRITE);
113
114         error = qat_alloc_dmamem(sc, &sc->sc_admin_comms.qadc_hb_dma, 1,
115             PAGE_SIZE, PAGE_SIZE);
116         if (error)
117                 return error;
118
119         addr = (uint64_t)sc->sc_admin_comms.qadc_dma.qdm_dma_seg.ds_addr;
120         qat_misc_write_4(sc, ADMINMSGUR, addr >> 32);
121         qat_misc_write_4(sc, ADMINMSGLR, addr);
122
123         return 0;
124 }
125
126 int
127 qat_adm_mailbox_put_msg_sync(struct qat_softc *sc, uint32_t ae,
128     void *in, void *out)
129 {
130         struct qat_dmamem *qdm;
131         uint32_t mailbox;
132         bus_size_t mb_offset = MAILBOX_BASE + (ae * MAILBOX_STRIDE);
133         int offset = ae * ADMINMSG_LEN * 2;
134         int times, received;
135         uint8_t *buf = (uint8_t *)sc->sc_admin_comms.qadc_dma.qdm_dma_vaddr + offset;
136
137         mailbox = qat_misc_read_4(sc, mb_offset);
138         if (mailbox == 1)
139                 return EAGAIN;
140
141         qdm = &sc->sc_admin_comms.qadc_dma;
142         memcpy(buf, in, ADMINMSG_LEN);
143         bus_dmamap_sync(qdm->qdm_dma_tag, qdm->qdm_dma_map,
144             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
145         qat_misc_write_4(sc, mb_offset, 1);
146
147         received = 0;
148         for (times = 0; times < 50; times++) {
149                 DELAY(20000);
150                 if (qat_misc_read_4(sc, mb_offset) == 0) {
151                         received = 1;
152                         break;
153                 }
154         }
155         if (received) {
156                 bus_dmamap_sync(qdm->qdm_dma_tag, qdm->qdm_dma_map,
157                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
158                 memcpy(out, buf + ADMINMSG_LEN, ADMINMSG_LEN);
159         } else {
160                 device_printf(sc->sc_dev,
161                     "Failed to send admin msg to accelerator\n");
162         }
163
164         return received ? 0 : EFAULT;
165 }
166
167 int
168 qat_adm_mailbox_send(struct qat_softc *sc,
169     struct fw_init_admin_req *req, struct fw_init_admin_resp *resp)
170 {
171         int error;
172         uint32_t mask;
173         uint8_t ae;
174
175         for (ae = 0, mask = sc->sc_ae_mask; mask; ae++, mask >>= 1) {
176                 if (!(mask & 1))
177                         continue;
178
179                 error = qat_adm_mailbox_put_msg_sync(sc, ae, req, resp);
180                 if (error)
181                         return error;
182                 if (resp->init_resp_hdr.status) {
183                         device_printf(sc->sc_dev,
184                             "Failed to send admin msg: cmd %d\n",
185                             req->init_admin_cmd_id);
186                         return EFAULT;
187                 }
188         }
189
190         return 0;
191 }
192
193 int
194 qat_adm_mailbox_send_init_me(struct qat_softc *sc)
195 {
196         struct fw_init_admin_req req;
197         struct fw_init_admin_resp resp;
198
199         memset(&req, 0, sizeof(req));
200         req.init_admin_cmd_id = FW_INIT_ME;
201
202         return qat_adm_mailbox_send(sc, &req, &resp);
203 }
204
205 int
206 qat_adm_mailbox_send_hb_timer(struct qat_softc *sc)
207 {
208         struct fw_init_admin_req req;
209         struct fw_init_admin_resp resp;
210
211         memset(&req, 0, sizeof(req));
212         req.init_admin_cmd_id = FW_HEARTBEAT_TIMER_SET;
213
214         req.init_cfg_ptr = sc->sc_admin_comms.qadc_hb_dma.qdm_dma_seg.ds_addr;
215         req.heartbeat_ticks =
216             sc->sc_hw.qhw_clock_per_sec / 1000 * QAT_HB_INTERVAL;
217
218         return qat_adm_mailbox_send(sc, &req, &resp);
219 }
220
221 int
222 qat_adm_mailbox_send_fw_status(struct qat_softc *sc)
223 {
224         int error;
225         struct fw_init_admin_req req;
226         struct fw_init_admin_resp resp;
227
228         memset(&req, 0, sizeof(req));
229         req.init_admin_cmd_id = FW_STATUS_GET;
230
231         error = qat_adm_mailbox_send(sc, &req, &resp);
232         if (error)
233                 return error;
234
235         return 0;
236 }
237
238 int
239 qat_adm_mailbox_send_constants(struct qat_softc *sc)
240 {
241         struct fw_init_admin_req req;
242         struct fw_init_admin_resp resp;
243
244         memset(&req, 0, sizeof(req));
245         req.init_admin_cmd_id = FW_CONSTANTS_CFG;
246
247         req.init_cfg_sz = 1024;
248         req.init_cfg_ptr =
249             sc->sc_admin_comms.qadc_const_tbl_dma.qdm_dma_seg.ds_addr;
250
251         return qat_adm_mailbox_send(sc, &req, &resp);
252 }
253
254 int
255 qat_adm_mailbox_send_init(struct qat_softc *sc)
256 {
257         int error;
258
259         error = qat_adm_mailbox_send_init_me(sc);
260         if (error)
261                 return error;
262
263         error = qat_adm_mailbox_send_hb_timer(sc);
264         if (error)
265                 return error;
266
267         error = qat_adm_mailbox_send_fw_status(sc);
268         if (error)
269                 return error;
270
271         return qat_adm_mailbox_send_constants(sc);
272 }
273
274 int
275 qat_arb_init(struct qat_softc *sc)
276 {
277         uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
278         uint32_t arb, i;
279         const uint32_t *thd_2_arb_cfg;
280
281         /* Service arb configured for 32 bytes responses and
282          * ring flow control check enabled. */
283         for (arb = 0; arb < MAX_ARB; arb++)
284                 qat_arb_sarconfig_write_4(sc, arb, arb_cfg);
285
286         /* Map worker threads to service arbiters */
287         sc->sc_hw.qhw_get_arb_mapping(sc, &thd_2_arb_cfg);
288
289         if (!thd_2_arb_cfg)
290                 return EINVAL;
291
292         for (i = 0; i < sc->sc_hw.qhw_num_engines; i++)
293                 qat_arb_wrk_2_ser_map_write_4(sc, i, *(thd_2_arb_cfg + i));
294
295         return 0;
296 }
297
298 int
299 qat_set_ssm_wdtimer(struct qat_softc *sc)
300 {
301         uint32_t timer;
302         u_int mask;
303         int i;
304
305         timer = sc->sc_hw.qhw_clock_per_sec / 1000 * QAT_SSM_WDT;
306         for (i = 0, mask = sc->sc_accel_mask; mask; i++, mask >>= 1) {
307                 if (!(mask & 1))
308                         continue;
309                 qat_misc_write_4(sc, SSMWDT(i), timer);
310                 qat_misc_write_4(sc, SSMWDTPKE(i), timer);
311         }
312
313         return 0;
314 }
315
316 int
317 qat_check_slice_hang(struct qat_softc *sc)
318 {
319         int handled = 0;
320
321         return handled;
322 }
323
324 static uint32_t
325 qat_hw17_crypto_setup_cipher_ctrl(struct qat_crypto_desc *desc,
326     struct qat_session *qs, uint32_t cd_blk_offset,
327     struct fw_la_bulk_req *req_tmpl, enum fw_slice next_slice)
328 {
329         struct fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl =
330             (struct fw_cipher_cd_ctrl_hdr *)&req_tmpl->cd_ctrl;
331
332         desc->qcd_cipher_blk_sz = HW_AES_BLK_SZ;
333         desc->qcd_cipher_offset = cd_blk_offset;
334
335         cipher_cd_ctrl->cipher_state_sz = desc->qcd_cipher_blk_sz >> 3;
336         cipher_cd_ctrl->cipher_key_sz = qs->qs_cipher_klen >> 3;
337         cipher_cd_ctrl->cipher_cfg_offset = cd_blk_offset >> 3;
338         FW_COMN_CURR_ID_SET(cipher_cd_ctrl, FW_SLICE_CIPHER);
339         FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, next_slice);
340
341         return roundup(sizeof(struct hw_cipher_config) + qs->qs_cipher_klen, 8);
342 }
343
344 static void
345 qat_hw17_crypto_setup_cipher_cdesc(const struct qat_crypto_desc *desc,
346     const struct qat_session *qs, const struct cryptop *crp,
347     union hw_cipher_algo_blk *cipher)
348 {
349         const uint8_t *key;
350
351         cipher->max.cipher_config.val =
352             qat_crypto_load_cipher_session(desc, qs);
353         if (crp != NULL && crp->crp_cipher_key != NULL)
354                 key = crp->crp_cipher_key;
355         else
356                 key = qs->qs_cipher_key;
357         memcpy(cipher->max.key, key, qs->qs_cipher_klen);
358 }
359
360 static uint32_t
361 qat_hw17_crypto_setup_auth_ctrl(struct qat_crypto_desc *desc,
362     struct qat_session *qs, uint32_t cd_blk_offset,
363     struct fw_la_bulk_req *req_tmpl, enum fw_slice next_slice)
364 {
365         struct fw_auth_cd_ctrl_hdr *auth_cd_ctrl =
366             (struct fw_auth_cd_ctrl_hdr *)&req_tmpl->cd_ctrl;
367         struct qat_sym_hash_def const *hash_def;
368
369         (void)qat_crypto_load_auth_session(desc, qs, &hash_def);
370
371         auth_cd_ctrl->hash_cfg_offset = cd_blk_offset >> 3;
372         auth_cd_ctrl->hash_flags = FW_AUTH_HDR_FLAG_NO_NESTED;
373         auth_cd_ctrl->inner_res_sz = hash_def->qshd_alg->qshai_digest_len;
374         auth_cd_ctrl->final_sz = hash_def->qshd_alg->qshai_sah->hashsize;
375
376         auth_cd_ctrl->inner_state1_sz =
377             roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
378         auth_cd_ctrl->inner_state2_sz =
379             roundup(hash_def->qshd_qat->qshqi_state2_len, 8);
380         auth_cd_ctrl->inner_state2_offset =
381             auth_cd_ctrl->hash_cfg_offset +
382             ((sizeof(struct hw_auth_setup) +
383             auth_cd_ctrl->inner_state1_sz) >> 3);
384
385         FW_COMN_CURR_ID_SET(auth_cd_ctrl, FW_SLICE_AUTH);
386         FW_COMN_NEXT_ID_SET(auth_cd_ctrl, next_slice);
387
388         desc->qcd_auth_sz = auth_cd_ctrl->final_sz;
389         desc->qcd_auth_offset = cd_blk_offset;
390         desc->qcd_gcm_aad_sz_offset1 =
391             cd_blk_offset + offsetof(union hw_auth_algo_blk, max.state1) +
392             auth_cd_ctrl->inner_state1_sz + AES_BLOCK_LEN;
393
394         return roundup(auth_cd_ctrl->inner_state1_sz +
395             auth_cd_ctrl->inner_state2_sz +
396             sizeof(struct hw_auth_setup), 8);
397 }
398
399 static void
400 qat_hw17_crypto_setup_auth_cdesc(const struct qat_crypto_desc *desc,
401     const struct qat_session *qs, const struct cryptop *crp,
402     union hw_auth_algo_blk *auth)
403 {
404         struct qat_sym_hash_def const *hash_def;
405         uint8_t inner_state1_sz, *state1, *state2;
406         const uint8_t *key;
407
408         auth->max.inner_setup.auth_config.config =
409             qat_crypto_load_auth_session(desc, qs, &hash_def);
410         auth->max.inner_setup.auth_counter.counter =
411             htobe32(hash_def->qshd_qat->qshqi_auth_counter);
412         inner_state1_sz = roundup(hash_def->qshd_qat->qshqi_state1_len, 8);
413
414         state1 = auth->max.state1;
415         state2 = auth->max.state1 + inner_state1_sz;
416         switch (qs->qs_auth_algo) {
417         case HW_AUTH_ALGO_GALOIS_128:
418                 key = NULL;
419                 if (crp != NULL && crp->crp_cipher_key != NULL)
420                         key = crp->crp_cipher_key;
421                 else if (qs->qs_cipher_key != NULL)
422                         key = qs->qs_cipher_key;
423                 if (key != NULL) {
424                         qat_crypto_gmac_precompute(desc, key,
425                             qs->qs_cipher_klen, hash_def, state2);
426                 }
427                 break;
428         case HW_AUTH_ALGO_SHA1:
429         case HW_AUTH_ALGO_SHA256:
430         case HW_AUTH_ALGO_SHA384:
431         case HW_AUTH_ALGO_SHA512:
432                 switch (qs->qs_auth_mode) {
433                 case HW_AUTH_MODE0:
434                         memcpy(state1, hash_def->qshd_alg->qshai_init_state,
435                             inner_state1_sz);
436                         /* Override for mode 0 hashes. */
437                         auth->max.inner_setup.auth_counter.counter = 0;
438                         break;
439                 case HW_AUTH_MODE1:
440                         if (crp != NULL && crp->crp_auth_key != NULL)
441                                 key = crp->crp_auth_key;
442                         else
443                                 key = qs->qs_auth_key;
444                         if (key != NULL) {
445                                 qat_crypto_hmac_precompute(desc, key,
446                                     qs->qs_auth_klen, hash_def, state1, state2);
447                         }
448                         break;
449                 default:
450                         panic("%s: unhandled auth mode %d", __func__,
451                             qs->qs_auth_mode);
452                 }
453                 break;
454         default:
455                 panic("%s: unhandled auth algorithm %d", __func__,
456                     qs->qs_auth_algo);
457         }
458 }
459
460 static void
461 qat_hw17_init_comn_req_hdr(struct qat_crypto_desc *desc,
462     struct fw_la_bulk_req *req)
463 {
464         union fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
465         struct fw_comn_req_hdr *req_hdr = &req->comn_hdr;
466
467         req_hdr->service_cmd_id = desc->qcd_cmd_id;
468         req_hdr->hdr_flags = FW_COMN_VALID;
469         req_hdr->service_type = FW_COMN_REQ_CPM_FW_LA;
470         req_hdr->comn_req_flags = FW_COMN_FLAGS_BUILD(
471             COMN_CD_FLD_TYPE_64BIT_ADR, COMN_PTR_TYPE_SGL);
472         req_hdr->serv_specif_flags = 0;
473         cd_pars->s.content_desc_addr = desc->qcd_desc_paddr;
474 }
475
476 void
477 qat_hw17_crypto_setup_desc(struct qat_crypto *qcy, struct qat_session *qs,
478     struct qat_crypto_desc *desc)
479 {
480         union hw_cipher_algo_blk *cipher;
481         union hw_auth_algo_blk *auth;
482         struct fw_la_bulk_req *req_tmpl;
483         struct fw_comn_req_hdr *req_hdr;
484         uint32_t cd_blk_offset = 0;
485         int i;
486         uint8_t *cd_blk_ptr;
487
488         req_tmpl = (struct fw_la_bulk_req *)desc->qcd_req_cache;
489         req_hdr = &req_tmpl->comn_hdr;
490         cd_blk_ptr = desc->qcd_content_desc;
491
492         memset(req_tmpl, 0, sizeof(struct fw_la_bulk_req));
493         qat_hw17_init_comn_req_hdr(desc, req_tmpl);
494
495         for (i = 0; i < MAX_FW_SLICE; i++) {
496                 switch (desc->qcd_slices[i]) {
497                 case FW_SLICE_CIPHER:
498                         cipher = (union hw_cipher_algo_blk *)(cd_blk_ptr +
499                             cd_blk_offset);
500                         cd_blk_offset += qat_hw17_crypto_setup_cipher_ctrl(desc,
501                             qs, cd_blk_offset, req_tmpl,
502                             desc->qcd_slices[i + 1]);
503                         qat_hw17_crypto_setup_cipher_cdesc(desc, qs, NULL,
504                             cipher);
505                         break;
506                 case FW_SLICE_AUTH:
507                         auth = (union hw_auth_algo_blk *)(cd_blk_ptr +
508                             cd_blk_offset);
509                         cd_blk_offset += qat_hw17_crypto_setup_auth_ctrl(desc,
510                             qs, cd_blk_offset, req_tmpl,
511                             desc->qcd_slices[i + 1]);
512                         qat_hw17_crypto_setup_auth_cdesc(desc, qs, NULL, auth);
513                         req_hdr->serv_specif_flags |= FW_LA_RET_AUTH_RES;
514                         break;
515                 case FW_SLICE_DRAM_WR:
516                         i = MAX_FW_SLICE; /* end of chain */
517                         break;
518                 default:
519                         MPASS(0);
520                         break;
521                 }
522         }
523
524         req_tmpl->cd_pars.s.content_desc_params_sz =
525             roundup(cd_blk_offset, QAT_OPTIMAL_ALIGN) >> 3;
526         if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128)
527                 req_hdr->serv_specif_flags |=
528                     FW_LA_PROTO_GCM | FW_LA_GCM_IV_LEN_12_OCTETS;
529
530         bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
531             qs->qs_desc_mem.qdm_dma_map, BUS_DMASYNC_PREWRITE);
532 }
533
534 static void
535 qat_hw17_crypto_req_setkey(const struct qat_crypto_desc *desc,
536     const struct qat_session *qs, struct qat_sym_cookie *qsc,
537     struct fw_la_bulk_req *bulk_req, const struct cryptop *crp)
538 {
539         union hw_auth_algo_blk *auth;
540         union hw_cipher_algo_blk *cipher;
541         uint8_t *cdesc;
542         int i;
543
544         cdesc = qsc->qsc_content_desc;
545         memcpy(cdesc, desc->qcd_content_desc, CONTENT_DESC_MAX_SIZE);
546         for (i = 0; i < MAX_FW_SLICE; i++) {
547                 switch (desc->qcd_slices[i]) {
548                 case FW_SLICE_CIPHER:
549                         cipher = (union hw_cipher_algo_blk *)
550                             (cdesc + desc->qcd_cipher_offset);
551                         qat_hw17_crypto_setup_cipher_cdesc(desc, qs, crp,
552                             cipher);
553                         break;
554                 case FW_SLICE_AUTH:
555                         auth = (union hw_auth_algo_blk *)
556                             (cdesc + desc->qcd_auth_offset);
557                         qat_hw17_crypto_setup_auth_cdesc(desc, qs, crp, auth);
558                         break;
559                 case FW_SLICE_DRAM_WR:
560                         i = MAX_FW_SLICE; /* end of chain */
561                         break;
562                 default:
563                         MPASS(0);
564                 }
565         }
566
567         bulk_req->cd_pars.s.content_desc_addr = qsc->qsc_content_desc_paddr;
568 }
569
570 void
571 qat_hw17_crypto_setup_req_params(struct qat_crypto_bank *qcb __unused,
572     struct qat_session *qs, const struct qat_crypto_desc *desc,
573     struct qat_sym_cookie *qsc, struct cryptop *crp)
574 {
575         struct qat_sym_bulk_cookie *qsbc;
576         struct fw_la_bulk_req *bulk_req;
577         struct fw_la_cipher_req_params *cipher_param;
578         struct fw_la_auth_req_params *auth_param;
579         bus_addr_t digest_paddr;
580         uint32_t aad_sz, *aad_szp;
581         uint8_t *req_params_ptr;
582         enum fw_la_cmd_id cmd_id = desc->qcd_cmd_id;
583
584         qsbc = &qsc->qsc_bulk_cookie;
585         bulk_req = (struct fw_la_bulk_req *)qsbc->qsbc_msg;
586
587         memcpy(bulk_req, desc->qcd_req_cache, sizeof(struct fw_la_bulk_req));
588         bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc;
589         bulk_req->comn_mid.src_data_addr = qsc->qsc_buffer_list_desc_paddr;
590         if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
591                 bulk_req->comn_mid.dest_data_addr =
592                     qsc->qsc_obuffer_list_desc_paddr;
593         } else {
594                 bulk_req->comn_mid.dest_data_addr =
595                     qsc->qsc_buffer_list_desc_paddr;
596         }
597         if (__predict_false(crp->crp_cipher_key != NULL ||
598             crp->crp_auth_key != NULL))
599                 qat_hw17_crypto_req_setkey(desc, qs, qsc, bulk_req, crp);
600
601         digest_paddr = 0;
602         if (desc->qcd_auth_sz != 0)
603                 digest_paddr = qsc->qsc_auth_res_paddr;
604
605         req_params_ptr = (uint8_t *)&bulk_req->serv_specif_rqpars;
606         cipher_param = (struct fw_la_cipher_req_params *)req_params_ptr;
607         auth_param = (struct fw_la_auth_req_params *)
608             (req_params_ptr + sizeof(struct fw_la_cipher_req_params));
609
610         cipher_param->u.s.cipher_IV_ptr = qsc->qsc_iv_buf_paddr;
611
612         /*
613          * The SG list layout is a bit different for GCM and GMAC, it's simpler
614          * to handle those cases separately.
615          */
616         if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
617                 if (cmd_id != FW_LA_CMD_AUTH) {
618                         /*
619                          * Don't fill out the cipher block if we're doing GMAC
620                          * only.
621                          */
622                         cipher_param->cipher_offset = 0;
623                         cipher_param->cipher_length = crp->crp_payload_length;
624                 }
625                 auth_param->auth_off = 0;
626                 auth_param->auth_len = crp->crp_payload_length;
627                 auth_param->auth_res_addr = digest_paddr;
628                 auth_param->auth_res_sz = desc->qcd_auth_sz;
629                 auth_param->u1.aad_adr =
630                     crp->crp_aad_length > 0 ? qsc->qsc_gcm_aad_paddr : 0;
631                 auth_param->u2.aad_sz =
632                     roundup2(crp->crp_aad_length, QAT_AES_GCM_AAD_ALIGN);
633                 auth_param->hash_state_sz = auth_param->u2.aad_sz >> 3;
634
635                 /*
636                  * Update the hash state block if necessary.  This only occurs
637                  * when the AAD length changes between requests in a session and
638                  * is synchronized by qat_process().
639                  */
640                 aad_sz = htobe32(crp->crp_aad_length);
641                 aad_szp = (uint32_t *)(
642                     __DECONST(uint8_t *, desc->qcd_content_desc) +
643                     desc->qcd_gcm_aad_sz_offset1);
644                 if (__predict_false(*aad_szp != aad_sz)) {
645                         *aad_szp = aad_sz;
646                         bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
647                             qs->qs_desc_mem.qdm_dma_map,
648                             BUS_DMASYNC_PREWRITE);
649                 }
650         } else {
651                 if (cmd_id != FW_LA_CMD_AUTH) {
652                         if (crp->crp_aad_length == 0) {
653                                 cipher_param->cipher_offset = 0;
654                         } else if (crp->crp_aad == NULL) {
655                                 cipher_param->cipher_offset =
656                                     crp->crp_payload_start - crp->crp_aad_start;
657                         } else {
658                                 cipher_param->cipher_offset =
659                                     crp->crp_aad_length;
660                         }
661                         cipher_param->cipher_length = crp->crp_payload_length;
662                 }
663                 if (cmd_id != FW_LA_CMD_CIPHER) {
664                         auth_param->auth_off = 0;
665                         auth_param->auth_len =
666                             crp->crp_payload_length + crp->crp_aad_length;
667                         auth_param->auth_res_addr = digest_paddr;
668                         auth_param->auth_res_sz = desc->qcd_auth_sz;
669                         auth_param->u1.aad_adr = 0;
670                         auth_param->u2.aad_sz = 0;
671                         auth_param->hash_state_sz = 0;
672                 }
673         }
674 }