1 /* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
2 /* $NetBSD: qatvar.h,v 1.2 2020/03/14 18:08:39 ad Exp $ */
5 * Copyright (c) 2019 Internet Initiative Japan, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
31 * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
37 * * Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * * Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in
41 * the documentation and/or other materials provided with the
43 * * Neither the name of Intel Corporation nor the names of its
44 * contributors may be used to endorse or promote products derived
45 * from this software without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
48 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
49 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
50 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
51 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
53 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
54 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
55 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
56 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
57 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 #ifndef _DEV_PCI_QATVAR_H_
63 #define _DEV_PCI_QATVAR_H_
65 #include <sys/counter.h>
66 #include <sys/malloc.h>
68 #include <opencrypto/cryptodev.h>
70 #define QAT_NSYMREQ 256
71 #define QAT_NSYMCOOKIE ((QAT_NSYMREQ * 2 + 1) * 2)
72 #define QAT_NASYMREQ 64
73 #define QAT_BATCH_SUBMIT_FREE_SPACE 2
75 #define QAT_EV_NAME_SIZE 32
76 #define QAT_RING_NAME_SIZE 32
78 #define QAT_MAXSEG 32 /* max segments for sg dma */
79 #define QAT_MAXLEN 65535 /* IP_MAXPACKET */
81 #define QAT_HB_INTERVAL 500 /* heartbeat msec */
82 #define QAT_SSM_WDT 100
85 QAT_CHIP_C2XXX = 0, /* NanoQAT: Atom C2000 */
87 QAT_CHIP_C3XXX, /* Atom C3000 */
94 QAT_CHIP_DH895XCC_IOV,
112 #define TIMEOUT_AE_RESET 100
113 #define TIMEOUT_AE_CHECK 10000
114 #define TIMEOUT_AE_CSR 500
115 #define AE_EXEC_CYCLE 20
117 #define QAT_UOF_MAX_PAGE 1
118 #define QAT_UOF_MAX_PAGE_REGION 1
121 bus_dma_tag_t qdm_dma_tag;
122 bus_dmamap_t qdm_dma_map;
123 bus_size_t qdm_dma_size;
124 bus_dma_segment_t qdm_dma_seg;
128 /* Valid internal ring size values */
129 #define QAT_RING_SIZE_128 0x01
130 #define QAT_RING_SIZE_256 0x02
131 #define QAT_RING_SIZE_512 0x03
132 #define QAT_RING_SIZE_4K 0x06
133 #define QAT_RING_SIZE_16K 0x08
134 #define QAT_RING_SIZE_4M 0x10
135 #define QAT_MIN_RING_SIZE QAT_RING_SIZE_128
136 #define QAT_MAX_RING_SIZE QAT_RING_SIZE_4M
137 #define QAT_DEFAULT_RING_SIZE QAT_RING_SIZE_16K
139 /* Valid internal msg size values */
140 #define QAT_MSG_SIZE_32 0x01
141 #define QAT_MSG_SIZE_64 0x02
142 #define QAT_MSG_SIZE_128 0x04
143 #define QAT_MIN_MSG_SIZE QAT_MSG_SIZE_32
144 #define QAT_MAX_MSG_SIZE QAT_MSG_SIZE_128
146 /* Size to bytes conversion macros for ring and msg size values */
147 #define QAT_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
148 #define QAT_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
149 #define QAT_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
150 #define QAT_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
152 /* Minimum ring bufer size for memory allocation */
153 #define QAT_RING_SIZE_BYTES_MIN(SIZE) \
154 ((SIZE < QAT_SIZE_TO_RING_SIZE_IN_BYTES(QAT_RING_SIZE_4K)) ? \
155 QAT_SIZE_TO_RING_SIZE_IN_BYTES(QAT_RING_SIZE_4K) : SIZE)
156 #define QAT_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
157 #define QAT_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
159 /* Max outstanding requests */
160 #define QAT_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
161 ((((1 << (RING_SIZE - 1)) << 3) >> QAT_SIZE_TO_POW(MSG_SIZE)) - 1)
163 #define QAT_RING_PATTERN 0x7f
167 typedef int (*qat_cb_t)(struct qat_softc *, void *, void *);
170 struct mtx qr_ring_mtx; /* Lock per ring */
173 uint32_t * volatile qr_inflight; /* tx/rx shared */
177 uint8_t qr_ring_size;
178 uint32_t qr_ring; /* ring number in bank */
179 uint32_t qr_bank; /* bank number in device */
181 uint32_t qr_ring_mask;
184 struct qat_dmamem qr_dma;
185 bus_addr_t qr_ring_paddr;
191 struct qat_softc *qb_sc; /* back pointer to softc */
192 uint32_t qb_intr_mask; /* current interrupt mask */
193 uint32_t qb_allocated_rings; /* current allocated ring bitfiled */
194 uint32_t qb_coalescing_time; /* timer in nano sec, 0: disabled */
195 #define COALESCING_TIME_INTERVAL_DEFAULT 10000
196 #define COALESCING_TIME_INTERVAL_MIN 500
197 #define COALESCING_TIME_INTERVAL_MAX 0xfffff
198 uint32_t qb_bank; /* bank index */
199 struct mtx qb_bank_mtx;
200 struct resource *qb_ih;
203 struct qat_ring qb_et_rings[MAX_RING_PER_BANK];
208 uint32_t qab_nf_mask;
209 uint32_t qab_nf_dest;
210 uint32_t qab_ne_mask;
211 uint32_t qab_ne_dest;
215 struct qat_ae_page *qap_next;
216 struct qat_uof_page *qap_page;
217 struct qat_ae_region *qap_region;
221 #define QAT_AE_PAGA_FLAG_WAITING (1 << 0)
223 struct qat_ae_region {
224 struct qat_ae_page *qar_loaded_page;
225 STAILQ_HEAD(, qat_ae_page) qar_waiting_pages;
228 struct qat_ae_slice {
229 u_int qas_assigned_ctx_mask;
230 struct qat_ae_region qas_regions[QAT_UOF_MAX_PAGE_REGION];
231 struct qat_ae_page qas_pages[QAT_UOF_MAX_PAGE];
232 struct qat_ae_page *qas_cur_pages[MAX_AE_CTX];
233 struct qat_uof_image *qas_image;
236 #define QAT_AE(sc, ae) \
240 u_int qae_state; /* AE state */
241 u_int qae_ustore_size; /* free micro-store address */
242 u_int qae_free_addr; /* free micro-store address */
243 u_int qae_free_size; /* free micro-store size */
244 u_int qae_live_ctx_mask; /* live context mask */
245 u_int qae_ustore_dram_addr; /* mirco-store DRAM address */
246 u_int qae_reload_size; /* reloadable code size */
249 u_int qae_num_slices;
250 struct qat_ae_slice qae_slices[MAX_AE_CTX];
251 u_int qae_reloc_ustore_dram; /* reloadable ustore-dram address */
252 u_int qae_effect_ustore_size; /* effective AE ustore size */
253 u_int qae_shareable_ustore;
257 void *qmf_sym; /* SYM_OBJS in sc_fw_mof */
259 void *qmf_uof_objs; /* UOF_OBJS in sc_fw_mof */
260 size_t qmf_uof_objs_size;
261 void *qmf_suof_objs; /* SUOF_OBJS in sc_fw_mof */
262 size_t qmf_suof_objs_size;
265 struct qat_ae_batch_init {
270 STAILQ_ENTRY(qat_ae_batch_init) qabi_next;
273 STAILQ_HEAD(qat_ae_batch_init_list, qat_ae_batch_init);
275 /* overwritten struct uof_uword_block */
276 struct qat_uof_uword_block {
277 u_int quub_start_addr; /* start address */
278 u_int quub_num_words; /* number of microwords */
279 uint64_t quub_micro_words; /* pointer to the uwords */
282 struct qat_uof_page {
283 u_int qup_page_num; /* page number */
284 u_int qup_def_page; /* default page */
285 u_int qup_page_region; /* region of page */
286 u_int qup_beg_vaddr; /* begin virtual address */
287 u_int qup_beg_paddr; /* begin physical address */
289 u_int qup_num_uc_var; /* num of uC var in array */
290 struct uof_uword_fixup *qup_uc_var;
291 /* array of import variables */
292 u_int qup_num_imp_var; /* num of import var in array */
293 struct uof_import_var *qup_imp_var;
294 /* array of import variables */
295 u_int qup_num_imp_expr; /* num of import expr in array */
296 struct uof_uword_fixup *qup_imp_expr;
297 /* array of import expressions */
298 u_int qup_num_neigh_reg; /* num of neigh-reg in array */
299 struct uof_uword_fixup *qup_neigh_reg;
300 /* array of neigh-reg assignments */
301 u_int qup_num_micro_words; /* number of microwords in the seg */
303 u_int qup_num_uw_blocks; /* number of uword blocks */
304 struct qat_uof_uword_block *qup_uw_blocks;
305 /* array of uword blocks */
308 struct qat_uof_image {
309 struct uof_image *qui_image; /* image pointer */
310 struct qat_uof_page qui_pages[QAT_UOF_MAX_PAGE];
313 u_int qui_num_ae_reg; /* num of registers */
314 struct uof_ae_reg *qui_ae_reg; /* array of registers */
316 u_int qui_num_init_reg_sym; /* num of reg/sym init values */
317 struct uof_init_reg_sym *qui_init_reg_sym;
318 /* array of reg/sym init values */
320 u_int qui_num_sbreak; /* num of sbreak values */
321 struct qui_sbreak *qui_sbreak; /* array of sbreak values */
323 u_int qui_num_uwords_used;
324 /* highest uword addressreferenced + 1 */
327 struct qat_aefw_uof {
328 size_t qafu_size; /* uof size */
329 struct uof_obj_hdr *qafu_obj_hdr; /* UOF_OBJS */
332 size_t qafu_str_tab_size;
334 u_int qafu_num_init_mem;
335 struct uof_init_mem *qafu_init_mem;
336 size_t qafu_init_mem_size;
338 struct uof_var_mem_seg *qafu_var_mem_seg;
340 struct qat_ae_batch_init_list qafu_lm_init[MAX_AE];
341 size_t qafu_num_lm_init[MAX_AE];
342 size_t qafu_num_lm_init_inst[MAX_AE];
344 u_int qafu_num_imgs; /* number of uof image */
345 struct qat_uof_image qafu_imgs[MAX_NUM_AE * MAX_AE_CTX];
349 #define QAT_SERVICE_CRYPTO_A (1 << 0)
350 #define QAT_SERVICE_CRYPTO_B (1 << 1)
352 struct qat_admin_rings {
353 uint32_t qadr_active_aes_per_accel;
354 uint8_t qadr_srv_mask[MAX_AE_PER_ACCEL];
356 struct qat_dmamem qadr_dma;
357 struct fw_init_ring_table *qadr_master_ring_tbl;
358 struct fw_init_ring_table *qadr_cya_ring_tbl;
359 struct fw_init_ring_table *qadr_cyb_ring_tbl;
361 struct qat_ring *qadr_admin_tx;
362 struct qat_ring *qadr_admin_rx;
365 struct qat_accel_init_cb {
369 struct qat_admin_comms {
370 struct qat_dmamem qadc_dma;
371 struct qat_dmamem qadc_const_tbl_dma;
372 struct qat_dmamem qadc_hb_dma;
375 #define QAT_PID_MINOR_REV 0xf
376 #define QAT_PID_MAJOR_REV (0xf << 4)
378 struct qat_suof_image {
381 char *qsi_css_header;
383 char *qsi_css_signature;
385 u_long qsi_simg_size;
389 u_long qsi_simg_name;
390 u_long qsi_appmeta_data;
391 struct qat_dmamem qsi_dma;
394 struct qat_aefw_suof {
396 u_int qafs_check_sum;
401 u_int qafs_suof_size;
404 u_int qafs_num_simgs;
405 struct qat_suof_image *qafs_simg;
408 enum qat_sym_hash_algorithm {
409 QAT_SYM_HASH_NONE = 0,
410 QAT_SYM_HASH_MD5 = 1,
411 QAT_SYM_HASH_SHA1 = 2,
412 QAT_SYM_HASH_SHA224 = 3,
413 QAT_SYM_HASH_SHA256 = 4,
414 QAT_SYM_HASH_SHA384 = 5,
415 QAT_SYM_HASH_SHA512 = 6,
416 QAT_SYM_HASH_AES_XCBC = 7,
417 QAT_SYM_HASH_AES_CCM = 8,
418 QAT_SYM_HASH_AES_GCM = 9,
419 QAT_SYM_HASH_KASUMI_F9 = 10,
420 QAT_SYM_HASH_SNOW3G_UIA2 = 11,
421 QAT_SYM_HASH_AES_CMAC = 12,
422 QAT_SYM_HASH_AES_GMAC = 13,
423 QAT_SYM_HASH_AES_CBC_MAC = 14,
426 #define QAT_HASH_MD5_BLOCK_SIZE 64
427 #define QAT_HASH_MD5_DIGEST_SIZE 16
428 #define QAT_HASH_MD5_STATE_SIZE 16
429 #define QAT_HASH_SHA1_BLOCK_SIZE 64
430 #define QAT_HASH_SHA1_DIGEST_SIZE 20
431 #define QAT_HASH_SHA1_STATE_SIZE 20
432 #define QAT_HASH_SHA224_BLOCK_SIZE 64
433 #define QAT_HASH_SHA224_DIGEST_SIZE 28
434 #define QAT_HASH_SHA224_STATE_SIZE 32
435 #define QAT_HASH_SHA256_BLOCK_SIZE 64
436 #define QAT_HASH_SHA256_DIGEST_SIZE 32
437 #define QAT_HASH_SHA256_STATE_SIZE 32
438 #define QAT_HASH_SHA384_BLOCK_SIZE 128
439 #define QAT_HASH_SHA384_DIGEST_SIZE 48
440 #define QAT_HASH_SHA384_STATE_SIZE 64
441 #define QAT_HASH_SHA512_BLOCK_SIZE 128
442 #define QAT_HASH_SHA512_DIGEST_SIZE 64
443 #define QAT_HASH_SHA512_STATE_SIZE 64
444 #define QAT_HASH_XCBC_PRECOMP_KEY_NUM 3
445 #define QAT_HASH_XCBC_MAC_BLOCK_SIZE 16
446 #define QAT_HASH_XCBC_MAC_128_DIGEST_SIZE 16
447 #define QAT_HASH_CMAC_BLOCK_SIZE 16
448 #define QAT_HASH_CMAC_128_DIGEST_SIZE 16
449 #define QAT_HASH_AES_CCM_BLOCK_SIZE 16
450 #define QAT_HASH_AES_CCM_DIGEST_SIZE 16
451 #define QAT_HASH_AES_GCM_BLOCK_SIZE 16
452 #define QAT_HASH_AES_GCM_DIGEST_SIZE 16
453 #define QAT_HASH_AES_GCM_STATE_SIZE 16
454 #define QAT_HASH_KASUMI_F9_BLOCK_SIZE 8
455 #define QAT_HASH_KASUMI_F9_DIGEST_SIZE 4
456 #define QAT_HASH_SNOW3G_UIA2_BLOCK_SIZE 8
457 #define QAT_HASH_SNOW3G_UIA2_DIGEST_SIZE 4
458 #define QAT_HASH_AES_CBC_MAC_BLOCK_SIZE 16
459 #define QAT_HASH_AES_CBC_MAC_DIGEST_SIZE 16
460 #define QAT_HASH_AES_GCM_ICV_SIZE_8 8
461 #define QAT_HASH_AES_GCM_ICV_SIZE_12 12
462 #define QAT_HASH_AES_GCM_ICV_SIZE_16 16
463 #define QAT_HASH_AES_CCM_ICV_SIZE_MIN 4
464 #define QAT_HASH_AES_CCM_ICV_SIZE_MAX 16
465 #define QAT_HASH_IPAD_BYTE 0x36
466 #define QAT_HASH_OPAD_BYTE 0x5c
467 #define QAT_HASH_IPAD_4_BYTES 0x36363636
468 #define QAT_HASH_OPAD_4_BYTES 0x5c5c5c5c
469 #define QAT_HASH_KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA
471 #define QAT_SYM_XCBC_STATE_SIZE ((QAT_HASH_XCBC_MAC_BLOCK_SIZE) * 3)
472 #define QAT_SYM_CMAC_STATE_SIZE ((QAT_HASH_CMAC_BLOCK_SIZE) * 3)
474 struct qat_sym_hash_alg_info {
475 uint32_t qshai_digest_len; /* Digest length in bytes */
476 uint32_t qshai_block_len; /* Block length in bytes */
477 uint32_t qshai_state_size; /* size of above state in bytes */
478 const uint8_t *qshai_init_state; /* Initial state */
480 const struct auth_hash *qshai_sah; /* software auth hash */
481 uint32_t qshai_state_offset; /* offset to state in *_CTX */
482 uint32_t qshai_state_word;
485 struct qat_sym_hash_qat_info {
486 uint32_t qshqi_algo_enc; /* QAT Algorithm encoding */
487 uint32_t qshqi_auth_counter; /* Counter value for Auth */
488 uint32_t qshqi_state1_len; /* QAT state1 length in bytes */
489 uint32_t qshqi_state2_len; /* QAT state2 length in bytes */
492 struct qat_sym_hash_def {
493 const struct qat_sym_hash_alg_info *qshd_alg;
494 const struct qat_sym_hash_qat_info *qshd_qat;
497 #define QAT_SYM_REQ_PARAMS_SIZE_MAX (24 + 32)
498 /* Reserve enough space for cipher and authentication request params */
499 /* Basis of values are guaranteed in qat_hw*var.h with CTASSERT */
501 #define QAT_SYM_REQ_PARAMS_SIZE_PADDED \
502 roundup(QAT_SYM_REQ_PARAMS_SIZE_MAX, QAT_OPTIMAL_ALIGN)
503 /* Pad out to 64-byte multiple to ensure optimal alignment of next field */
505 #define QAT_SYM_KEY_TLS_PREFIX_SIZE (128)
506 /* Hash Prefix size in bytes for TLS (128 = MAX = SHA2 (384, 512)*/
508 #define QAT_SYM_KEY_MAX_HASH_STATE_BUFFER \
509 (QAT_SYM_KEY_TLS_PREFIX_SIZE * 2)
510 /* hash state prefix buffer structure that holds the maximum sized secret */
512 #define QAT_SYM_HASH_BUFFER_LEN QAT_HASH_SHA512_STATE_SIZE
513 /* Buffer length to hold 16 byte MD5 key and 20 byte SHA1 key */
515 #define QAT_GCM_AAD_SIZE_MAX 240
516 /* Maximum AAD size */
518 #define QAT_AES_GCM_AAD_ALIGN 16
520 struct qat_sym_bulk_cookie {
521 uint8_t qsbc_req_params_buf[QAT_SYM_REQ_PARAMS_SIZE_PADDED];
522 /* memory block reserved for request params
523 * NOTE: Field must be correctly aligned in memory for access by QAT
525 struct qat_crypto *qsbc_crypto;
526 struct qat_session *qsbc_session;
527 /* Session context */
529 /* correlator supplied by the client */
530 uint8_t qsbc_msg[QAT_MSG_SIZE_TO_BYTES(QAT_MAX_MSG_SIZE)];
531 /* QAT request message */
532 } __aligned(QAT_OPTIMAL_ALIGN);
534 /* Basis of values are guaranteed in qat_hw*var.h with CTASSERT */
535 #define HASH_CONTENT_DESC_SIZE 176
536 #define CIPHER_CONTENT_DESC_SIZE 64
538 #define CONTENT_DESC_MAX_SIZE roundup( \
539 HASH_CONTENT_DESC_SIZE + CIPHER_CONTENT_DESC_SIZE, \
542 struct qat_sym_cookie {
543 union qat_sym_cookie_u {
544 /* should be 64byte aligned */
545 struct qat_sym_bulk_cookie qsc_bulk_cookie;
546 /* symmetric bulk cookie */
548 struct qat_sym_key_cookie qsc_key_cookie;
549 /* symmetric key cookie */
550 struct qat_sym_nrbg_cookie qsc_nrbg_cookie;
551 /* symmetric NRBG cookie */
555 /* should be 64-byte aligned */
556 struct buffer_list_desc qsc_buf_list;
557 struct flat_buffer_desc qsc_flat_bufs[QAT_MAXSEG]; /* should be here */
559 bus_dmamap_t qsc_self_dmamap; /* self DMA mapping and
561 bus_dma_tag_t qsc_self_dma_tag;
563 uint8_t qsc_iv_buf[EALG_MAX_BLOCK_LEN];
564 uint8_t qsc_auth_res[QAT_SYM_HASH_BUFFER_LEN];
565 uint8_t qsc_gcm_aad[QAT_GCM_AAD_SIZE_MAX];
566 uint8_t qsc_content_desc[CONTENT_DESC_MAX_SIZE];
567 struct cryptodesc *qsc_enc;
568 struct cryptodesc *qsc_mac;
570 bus_dmamap_t qsc_buf_dmamap; /* qsc_flat_bufs DMA mapping */
571 bus_dma_tag_t qsc_buf_dma_tag;
574 bus_addr_t qsc_bulk_req_params_buf_paddr;
575 bus_addr_t qsc_buffer_list_desc_paddr;
576 bus_addr_t qsc_iv_buf_paddr;
577 bus_addr_t qsc_auth_res_paddr;
578 bus_addr_t qsc_gcm_aad_paddr;
579 bus_addr_t qsc_content_desc_paddr;
582 CTASSERT(offsetof(struct qat_sym_cookie,
583 u.qsc_bulk_cookie.qsbc_req_params_buf) % QAT_OPTIMAL_ALIGN == 0);
584 CTASSERT(offsetof(struct qat_sym_cookie, qsc_buf_list) % QAT_OPTIMAL_ALIGN == 0);
586 #define MAX_CIPHER_SETUP_BLK_SZ \
587 (sizeof(struct hw_cipher_config) + \
588 2 * HW_KASUMI_KEY_SZ + 2 * HW_KASUMI_BLK_SZ)
589 #define MAX_HASH_SETUP_BLK_SZ sizeof(union hw_auth_algo_blk)
591 struct qat_crypto_desc {
592 uint8_t qcd_content_desc[CONTENT_DESC_MAX_SIZE]; /* must be first */
593 /* using only for qat 1.5 */
594 uint8_t qcd_hash_state_prefix_buf[QAT_GCM_AAD_SIZE_MAX];
596 bus_addr_t qcd_desc_paddr;
597 bus_addr_t qcd_hash_state_paddr;
599 enum fw_slice qcd_slices[MAX_FW_SLICE + 1];
600 enum fw_la_cmd_id qcd_cmd_id;
601 enum hw_cipher_dir qcd_cipher_dir;
603 /* content desc info */
604 uint8_t qcd_hdr_sz; /* in quad words */
605 uint8_t qcd_hw_blk_sz; /* in quad words */
606 uint32_t qcd_cipher_offset;
607 uint32_t qcd_auth_offset;
609 uint8_t qcd_state_storage_sz; /* in quad words */
610 uint32_t qcd_gcm_aad_sz_offset1;
611 uint32_t qcd_gcm_aad_sz_offset2;
613 uint16_t qcd_cipher_blk_sz; /* in bytes */
614 uint16_t qcd_auth_sz; /* in bytes */
616 uint8_t qcd_req_cache[QAT_MSG_SIZE_TO_BYTES(QAT_MAX_MSG_SIZE)];
617 } __aligned(QAT_OPTIMAL_ALIGN);
619 /* should be aligned to 64bytes */
621 struct qat_crypto_desc *qs_dec_desc; /* should be at top of struct*/
622 /* decrypt or auth then decrypt or auth */
624 struct qat_crypto_desc *qs_enc_desc;
625 /* encrypt or encrypt then auth */
627 struct qat_dmamem qs_desc_mem;
629 enum hw_cipher_algo qs_cipher_algo;
630 enum hw_cipher_mode qs_cipher_mode;
631 enum hw_auth_algo qs_auth_algo;
632 enum hw_auth_mode qs_auth_mode;
634 const uint8_t *qs_cipher_key;
636 const uint8_t *qs_auth_key;
642 #define QAT_SESSION_STATUS_ACTIVE (1 << 0)
643 #define QAT_SESSION_STATUS_FREEING (1 << 1)
644 uint32_t qs_inflight;
648 struct mtx qs_session_mtx;
651 struct qat_crypto_bank {
654 struct qat_ring *qcb_sym_tx;
655 struct qat_ring *qcb_sym_rx;
657 struct qat_dmamem qcb_symck_dmamems[QAT_NSYMCOOKIE];
658 struct qat_sym_cookie *qcb_symck_free[QAT_NSYMCOOKIE];
659 uint32_t qcb_symck_free_count;
661 struct mtx qcb_bank_mtx;
663 char qcb_ring_names[2][QAT_RING_NAME_SIZE]; /* sym tx,rx */
667 struct qat_softc *qcy_sc;
668 uint32_t qcy_bank_mask;
669 uint16_t qcy_num_banks;
671 int32_t qcy_cid; /* OpenCrypto driver ID */
673 struct qat_crypto_bank *qcy_banks; /* array of qat_crypto_bank */
675 uint32_t qcy_session_free_count;
677 struct mtx qcy_crypto_mtx;
681 int8_t qhw_sram_bar_id;
682 int8_t qhw_misc_bar_id;
683 int8_t qhw_etr_bar_id;
685 bus_size_t qhw_cap_global_offset;
686 bus_size_t qhw_ae_offset;
687 bus_size_t qhw_ae_local_offset;
688 bus_size_t qhw_etr_bundle_size;
690 /* crypto processing callbacks */
691 size_t qhw_crypto_opaque_offset;
692 void (*qhw_crypto_setup_req_params)(struct qat_crypto_bank *,
693 struct qat_session *, struct qat_crypto_desc const *,
694 struct qat_sym_cookie *, struct cryptodesc *, struct cryptodesc *);
695 void (*qhw_crypto_setup_desc)(struct qat_crypto *, struct qat_session *,
696 struct qat_crypto_desc *);
698 uint8_t qhw_num_banks; /* max number of banks */
699 uint8_t qhw_num_ap_banks; /* max number of AutoPush banks */
700 uint8_t qhw_num_rings_per_bank; /* rings per bank */
701 uint8_t qhw_num_accel; /* max number of accelerators */
702 uint8_t qhw_num_engines; /* max number of accelerator engines */
703 uint8_t qhw_tx_rx_gap;
704 uint32_t qhw_tx_rings_mask;
705 uint32_t qhw_clock_per_sec;
707 uint32_t qhw_fw_req_size;
708 uint32_t qhw_fw_resp_size;
710 uint8_t qhw_ring_sym_tx;
711 uint8_t qhw_ring_sym_rx;
712 uint8_t qhw_ring_asym_tx;
713 uint8_t qhw_ring_asym_rx;
716 uint32_t qhw_msix_ae_vec_gap; /* gap to ae vec from bank */
718 const char *qhw_mof_fwname;
719 const char *qhw_mmp_fwname;
721 uint32_t qhw_prod_type; /* cpu type */
723 /* setup callbacks */
724 uint32_t (*qhw_get_accel_mask)(struct qat_softc *);
725 uint32_t (*qhw_get_ae_mask)(struct qat_softc *);
726 enum qat_sku (*qhw_get_sku)(struct qat_softc *);
727 uint32_t (*qhw_get_accel_cap)(struct qat_softc *);
728 const char *(*qhw_get_fw_uof_name)(struct qat_softc *);
729 void (*qhw_enable_intr)(struct qat_softc *);
730 void (*qhw_init_etr_intr)(struct qat_softc *, int);
731 int (*qhw_init_admin_comms)(struct qat_softc *);
732 int (*qhw_send_admin_init)(struct qat_softc *);
733 int (*qhw_init_arb)(struct qat_softc *);
734 void (*qhw_get_arb_mapping)(struct qat_softc *, const uint32_t **);
735 void (*qhw_enable_error_correction)(struct qat_softc *);
736 int (*qhw_check_uncorrectable_error)(struct qat_softc *);
737 void (*qhw_print_err_registers)(struct qat_softc *);
738 void (*qhw_disable_error_interrupts)(struct qat_softc *);
739 int (*qhw_check_slice_hang)(struct qat_softc *);
740 int (*qhw_set_ssm_wdtimer)(struct qat_softc *);
745 #define QAT_FLAG_ESRAM_ENABLE_AUTO_INIT (1 << 0)
746 #define QAT_FLAG_SHRAM_WAIT_READY (1 << 1)
749 #define QAT_ACCEL_CAP_CRYPTO_SYMMETRIC (1 << 0)
750 #define QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC (1 << 1)
751 #define QAT_ACCEL_CAP_CIPHER (1 << 2)
752 #define QAT_ACCEL_CAP_AUTHENTICATION (1 << 3)
753 #define QAT_ACCEL_CAP_REGEX (1 << 4)
754 #define QAT_ACCEL_CAP_COMPRESSION (1 << 5)
755 #define QAT_ACCEL_CAP_LZS_COMPRESSION (1 << 6)
756 #define QAT_ACCEL_CAP_RANDOM_NUMBER (1 << 7)
757 #define QAT_ACCEL_CAP_ZUC (1 << 8)
758 #define QAT_ACCEL_CAP_SHA3 (1 << 9)
759 #define QAT_ACCEL_CAP_KPT (1 << 10)
761 #define QAT_ACCEL_CAP_BITS \
766 "b\x07" "RANDOM_NUMBER\0" \
767 "b\x06" "LZS_COMPRESSION\0" \
768 "b\x05" "COMPRESSION\0" \
770 "b\x03" "AUTHENTICATION\0" \
772 "b\x01" "CRYPTO_ASYMMETRIC\0" \
773 "b\x00" "CRYPTO_SYMMETRIC\0"
775 #define QAT_HI_PRIO_RING_WEIGHT 0xfc
776 #define QAT_LO_PRIO_RING_WEIGHT 0xfe
777 #define QAT_DEFAULT_RING_WEIGHT 0xff
778 #define QAT_DEFAULT_PVL 0
786 struct resource *sc_res[MAX_BARS];
787 int sc_rid[MAX_BARS];
788 bus_space_tag_t sc_csrt[MAX_BARS];
789 bus_space_handle_t sc_csrh[MAX_BARS];
794 struct qat_crypto sc_crypto; /* crypto services */
802 uint32_t sc_accel_num;
803 uint32_t sc_accel_mask;
804 uint32_t sc_accel_cap;
806 struct qat_admin_rings sc_admin_rings; /* use only for qat 1.5 */
807 struct qat_admin_comms sc_admin_comms; /* use only for qat 1.7 */
810 struct qat_bank *sc_etr_banks; /* array of etr banks */
811 struct qat_ap_bank *sc_etr_ap_banks; /* array of etr auto push banks */
814 struct qat_ae sc_ae[MAX_NUM_AE];
817 struct resource *sc_ih; /* ae cluster ih */
818 void *sc_ih_cookie; /* ae cluster ih cookie */
821 counter_u64_t sc_gcm_aad_restarts;
822 counter_u64_t sc_gcm_aad_updates;
823 counter_u64_t sc_ring_full_restarts;
824 counter_u64_t sc_sym_alloc_failures;
827 void *sc_fw_mof; /* mof data */
828 size_t sc_fw_mof_size; /* mof size */
829 struct qat_mof sc_mof; /* mof sections */
831 const char *sc_fw_uof_name; /* uof/suof name in mof */
833 void *sc_fw_uof; /* uof head */
834 size_t sc_fw_uof_size; /* uof size */
835 struct qat_aefw_uof sc_aefw_uof; /* UOF_OBJS in uof */
837 void *sc_fw_suof; /* suof head */
838 size_t sc_fw_suof_size; /* suof size */
839 struct qat_aefw_suof sc_aefw_suof; /* suof context */
841 void *sc_fw_mmp; /* mmp data */
842 size_t sc_fw_mmp_size; /* mmp size */
846 qat_bar_write_4(struct qat_softc *sc, int baroff, bus_size_t offset,
850 MPASS(baroff >= 0 && baroff < MAX_BARS);
852 bus_space_write_4(sc->sc_csrt[baroff],
853 sc->sc_csrh[baroff], offset, value);
856 static inline uint32_t
857 qat_bar_read_4(struct qat_softc *sc, int baroff, bus_size_t offset)
860 MPASS(baroff >= 0 && baroff < MAX_BARS);
862 return bus_space_read_4(sc->sc_csrt[baroff],
863 sc->sc_csrh[baroff], offset);
867 qat_misc_write_4(struct qat_softc *sc, bus_size_t offset, uint32_t value)
870 qat_bar_write_4(sc, sc->sc_hw.qhw_misc_bar_id, offset, value);
873 static inline uint32_t
874 qat_misc_read_4(struct qat_softc *sc, bus_size_t offset)
877 return qat_bar_read_4(sc, sc->sc_hw.qhw_misc_bar_id, offset);
881 qat_misc_read_write_or_4(struct qat_softc *sc, bus_size_t offset,
886 reg = qat_misc_read_4(sc, offset);
888 qat_misc_write_4(sc, offset, reg);
892 qat_misc_read_write_and_4(struct qat_softc *sc, bus_size_t offset,
897 reg = qat_misc_read_4(sc, offset);
899 qat_misc_write_4(sc, offset, reg);
903 qat_etr_write_4(struct qat_softc *sc, bus_size_t offset, uint32_t value)
906 qat_bar_write_4(sc, sc->sc_hw.qhw_etr_bar_id, offset, value);
909 static inline uint32_t
910 qat_etr_read_4(struct qat_softc *sc, bus_size_t offset)
913 return qat_bar_read_4(sc, sc->sc_hw.qhw_etr_bar_id, offset);
917 qat_ae_local_write_4(struct qat_softc *sc, u_char ae, bus_size_t offset,
921 offset = __SHIFTIN(ae & sc->sc_ae_mask, AE_LOCAL_AE_MASK) |
922 (offset & AE_LOCAL_CSR_MASK);
924 qat_misc_write_4(sc, sc->sc_hw.qhw_ae_local_offset + offset,
928 static inline uint32_t
929 qat_ae_local_read_4(struct qat_softc *sc, u_char ae, bus_size_t offset)
932 offset = __SHIFTIN(ae & sc->sc_ae_mask, AE_LOCAL_AE_MASK) |
933 (offset & AE_LOCAL_CSR_MASK);
935 return qat_misc_read_4(sc, sc->sc_hw.qhw_ae_local_offset + offset);
939 qat_ae_xfer_write_4(struct qat_softc *sc, u_char ae, bus_size_t offset,
942 offset = __SHIFTIN(ae & sc->sc_ae_mask, AE_XFER_AE_MASK) |
943 __SHIFTIN(offset, AE_XFER_CSR_MASK);
945 qat_misc_write_4(sc, sc->sc_hw.qhw_ae_offset + offset, value);
949 qat_cap_global_write_4(struct qat_softc *sc, bus_size_t offset, uint32_t value)
952 qat_misc_write_4(sc, sc->sc_hw.qhw_cap_global_offset + offset, value);
955 static inline uint32_t
956 qat_cap_global_read_4(struct qat_softc *sc, bus_size_t offset)
959 return qat_misc_read_4(sc, sc->sc_hw.qhw_cap_global_offset + offset);
964 qat_etr_bank_write_4(struct qat_softc *sc, int bank,
965 bus_size_t offset, uint32_t value)
968 qat_etr_write_4(sc, sc->sc_hw.qhw_etr_bundle_size * bank + offset,
972 static inline uint32_t
973 qat_etr_bank_read_4(struct qat_softc *sc, int bank,
977 return qat_etr_read_4(sc,
978 sc->sc_hw.qhw_etr_bundle_size * bank + offset);
982 qat_etr_ap_bank_write_4(struct qat_softc *sc, int ap_bank,
983 bus_size_t offset, uint32_t value)
986 qat_etr_write_4(sc, ETR_AP_BANK_OFFSET * ap_bank + offset, value);
989 static inline uint32_t
990 qat_etr_ap_bank_read_4(struct qat_softc *sc, int ap_bank,
994 return qat_etr_read_4(sc, ETR_AP_BANK_OFFSET * ap_bank + offset);
999 qat_etr_bank_ring_write_4(struct qat_softc *sc, int bank, int ring,
1000 bus_size_t offset, uint32_t value)
1003 qat_etr_bank_write_4(sc, bank, (ring << 2) + offset, value);
1006 static inline uint32_t
1007 qat_etr_bank_ring_read_4(struct qat_softc *sc, int bank, int ring,
1011 return qat_etr_bank_read_4(sc, bank, (ring << 2) * offset);
1015 qat_etr_bank_ring_base_write_8(struct qat_softc *sc, int bank, int ring,
1020 lo = (uint32_t)(value & 0xffffffff);
1021 hi = (uint32_t)((value & 0xffffffff00000000ULL) >> 32);
1022 qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_LBASE, lo);
1023 qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_UBASE, hi);
1027 qat_arb_ringsrvarben_write_4(struct qat_softc *sc, int index, uint32_t value)
1030 qat_etr_write_4(sc, ARB_RINGSRVARBEN_OFFSET +
1031 (ARB_REG_SLOT * index), value);
1035 qat_arb_sarconfig_write_4(struct qat_softc *sc, int index, uint32_t value)
1038 qat_etr_write_4(sc, ARB_OFFSET +
1039 (ARB_REG_SIZE * index), value);
1043 qat_arb_wrk_2_ser_map_write_4(struct qat_softc *sc, int index, uint32_t value)
1046 qat_etr_write_4(sc, ARB_OFFSET + ARB_WRK_2_SER_MAP_OFFSET +
1047 (ARB_REG_SIZE * index), value);
1050 void * qat_alloc_mem(size_t);
1051 void qat_free_mem(void *);
1052 void qat_free_dmamem(struct qat_softc *, struct qat_dmamem *);
1053 int qat_alloc_dmamem(struct qat_softc *, struct qat_dmamem *, int,
1054 bus_size_t, bus_size_t);
1056 int qat_etr_setup_ring(struct qat_softc *, int, uint32_t, uint32_t,
1057 uint32_t, qat_cb_t, void *, const char *,
1058 struct qat_ring **);
1059 int qat_etr_put_msg(struct qat_softc *, struct qat_ring *,
1062 void qat_memcpy_htobe64(void *, const void *, size_t);
1063 void qat_memcpy_htobe32(void *, const void *, size_t);
1064 void qat_memcpy_htobe(void *, const void *, size_t, uint32_t);
1065 void qat_crypto_gmac_precompute(const struct qat_crypto_desc *,
1066 const uint8_t *key, int klen,
1067 const struct qat_sym_hash_def *, uint8_t *);
1068 void qat_crypto_hmac_precompute(const struct qat_crypto_desc *,
1069 const uint8_t *, int, const struct qat_sym_hash_def *,
1070 uint8_t *, uint8_t *);
1071 uint16_t qat_crypto_load_cipher_session(const struct qat_crypto_desc *,
1072 const struct qat_session *);
1073 uint16_t qat_crypto_load_auth_session(const struct qat_crypto_desc *,
1074 const struct qat_session *,
1075 struct qat_sym_hash_def const **);