2 * Copyright (c) 2012-2016 Solarflare Communications Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
24 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * The views and conclusions contained in the software and documentation are
27 * those of the authors and should not be interpreted as representing official
28 * policies, either expressed or implied, of the FreeBSD Project.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
38 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
41 static __checkReturn efx_rc_t
45 __in uint32_t target_evq,
47 __in uint32_t instance,
48 __in efsys_mem_t *esmp,
49 __in boolean_t disable_scatter,
50 __in boolean_t want_inner_classes,
51 __in uint32_t ps_bufsize,
52 __in uint32_t es_bufs_per_desc,
53 __in uint32_t es_max_dma_len,
54 __in uint32_t es_buf_stride,
55 __in uint32_t hol_block_timeout)
57 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
59 uint8_t payload[MAX(MC_CMD_INIT_RXQ_V3_IN_LEN,
60 MC_CMD_INIT_RXQ_V3_OUT_LEN)];
61 int npages = EFX_RXQ_NBUFS(ndescs);
63 efx_qword_t *dma_addr;
67 boolean_t want_outer_classes;
69 EFSYS_ASSERT3U(ndescs, <=, EFX_RXQ_MAXNDESCS);
71 if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_RXQ_SIZE(ndescs))) {
77 dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM;
78 else if (es_bufs_per_desc > 0)
79 dma_mode = MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER;
81 dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET;
83 if (encp->enc_tunnel_encapsulations_supported != 0 &&
84 !want_inner_classes) {
86 * WANT_OUTER_CLASSES can only be specified on hardware which
87 * supports tunnel encapsulation offloads, even though it is
88 * effectively the behaviour the hardware gives.
90 * Also, on hardware which does support such offloads, older
91 * firmware rejects the flag if the offloads are not supported
92 * by the current firmware variant, which means this may fail if
93 * the capabilities are not updated when the firmware variant
94 * changes. This is not an issue on newer firmware, as it was
95 * changed in bug 69842 (v6.4.2.1007) to permit this flag to be
96 * specified on all firmware variants.
98 want_outer_classes = B_TRUE;
100 want_outer_classes = B_FALSE;
103 (void) memset(payload, 0, sizeof (payload));
104 req.emr_cmd = MC_CMD_INIT_RXQ;
105 req.emr_in_buf = payload;
106 req.emr_in_length = MC_CMD_INIT_RXQ_V3_IN_LEN;
107 req.emr_out_buf = payload;
108 req.emr_out_length = MC_CMD_INIT_RXQ_V3_OUT_LEN;
110 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs);
111 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, target_evq);
112 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label);
113 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance);
114 MCDI_IN_POPULATE_DWORD_9(req, INIT_RXQ_EXT_IN_FLAGS,
115 INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0,
116 INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0,
117 INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0,
118 INIT_RXQ_EXT_IN_CRC_MODE, 0,
119 INIT_RXQ_EXT_IN_FLAG_PREFIX, 1,
120 INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, disable_scatter,
121 INIT_RXQ_EXT_IN_DMA_MODE,
123 INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, ps_bufsize,
124 INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES, want_outer_classes);
125 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0);
126 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
128 if (es_bufs_per_desc > 0) {
129 MCDI_IN_SET_DWORD(req,
130 INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET,
132 MCDI_IN_SET_DWORD(req,
133 INIT_RXQ_V3_IN_ES_MAX_DMA_LEN, es_max_dma_len);
134 MCDI_IN_SET_DWORD(req,
135 INIT_RXQ_V3_IN_ES_PACKET_STRIDE, es_buf_stride);
136 MCDI_IN_SET_DWORD(req,
137 INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT,
141 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);
142 addr = EFSYS_MEM_ADDR(esmp);
144 for (i = 0; i < npages; i++) {
145 EFX_POPULATE_QWORD_2(*dma_addr,
146 EFX_DWORD_1, (uint32_t)(addr >> 32),
147 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
150 addr += EFX_BUF_SIZE;
153 efx_mcdi_execute(enp, &req);
155 if (req.emr_rc != 0) {
165 EFSYS_PROBE1(fail1, efx_rc_t, rc);
170 static __checkReturn efx_rc_t
173 __in uint32_t instance)
176 uint8_t payload[MAX(MC_CMD_FINI_RXQ_IN_LEN,
177 MC_CMD_FINI_RXQ_OUT_LEN)];
180 (void) memset(payload, 0, sizeof (payload));
181 req.emr_cmd = MC_CMD_FINI_RXQ;
182 req.emr_in_buf = payload;
183 req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN;
184 req.emr_out_buf = payload;
185 req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN;
187 MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance);
189 efx_mcdi_execute_quiet(enp, &req);
191 if (req.emr_rc != 0) {
200 * EALREADY is not an error, but indicates that the MC has rebooted and
201 * that the RXQ has already been destroyed.
204 EFSYS_PROBE1(fail1, efx_rc_t, rc);
209 #if EFSYS_OPT_RX_SCALE
210 static __checkReturn efx_rc_t
211 efx_mcdi_rss_context_alloc(
213 __in efx_rx_scale_context_type_t type,
214 __in uint32_t num_queues,
215 __out uint32_t *rss_contextp)
218 uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN,
219 MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)];
220 uint32_t rss_context;
221 uint32_t context_type;
224 if (num_queues > EFX_MAXRSS) {
230 case EFX_RX_SCALE_EXCLUSIVE:
231 context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE;
233 case EFX_RX_SCALE_SHARED:
234 context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
241 (void) memset(payload, 0, sizeof (payload));
242 req.emr_cmd = MC_CMD_RSS_CONTEXT_ALLOC;
243 req.emr_in_buf = payload;
244 req.emr_in_length = MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN;
245 req.emr_out_buf = payload;
246 req.emr_out_length = MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN;
248 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
249 EVB_PORT_ID_ASSIGNED);
250 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_TYPE, context_type);
253 * For exclusive contexts, NUM_QUEUES is only used to validate
254 * indirection table offsets.
255 * For shared contexts, the provided context will spread traffic over
256 * NUM_QUEUES many queues.
258 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, num_queues);
260 efx_mcdi_execute(enp, &req);
262 if (req.emr_rc != 0) {
267 if (req.emr_out_length_used < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) {
272 rss_context = MCDI_OUT_DWORD(req, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
273 if (rss_context == EF10_RSS_CONTEXT_INVALID) {
278 *rss_contextp = rss_context;
291 EFSYS_PROBE1(fail1, efx_rc_t, rc);
295 #endif /* EFSYS_OPT_RX_SCALE */
297 #if EFSYS_OPT_RX_SCALE
299 efx_mcdi_rss_context_free(
301 __in uint32_t rss_context)
304 uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_FREE_IN_LEN,
305 MC_CMD_RSS_CONTEXT_FREE_OUT_LEN)];
308 if (rss_context == EF10_RSS_CONTEXT_INVALID) {
313 (void) memset(payload, 0, sizeof (payload));
314 req.emr_cmd = MC_CMD_RSS_CONTEXT_FREE;
315 req.emr_in_buf = payload;
316 req.emr_in_length = MC_CMD_RSS_CONTEXT_FREE_IN_LEN;
317 req.emr_out_buf = payload;
318 req.emr_out_length = MC_CMD_RSS_CONTEXT_FREE_OUT_LEN;
320 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, rss_context);
322 efx_mcdi_execute_quiet(enp, &req);
324 if (req.emr_rc != 0) {
334 EFSYS_PROBE1(fail1, efx_rc_t, rc);
338 #endif /* EFSYS_OPT_RX_SCALE */
340 #if EFSYS_OPT_RX_SCALE
342 efx_mcdi_rss_context_set_flags(
344 __in uint32_t rss_context,
345 __in efx_rx_hash_type_t type)
347 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
348 efx_rx_hash_type_t type_ipv4;
349 efx_rx_hash_type_t type_ipv4_tcp;
350 efx_rx_hash_type_t type_ipv6;
351 efx_rx_hash_type_t type_ipv6_tcp;
352 efx_rx_hash_type_t modes;
354 uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN,
355 MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN)];
358 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_LBN ==
359 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN);
360 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_WIDTH ==
361 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH);
362 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_LBN ==
363 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN);
364 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_WIDTH ==
365 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH);
366 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_TCP_LBN ==
367 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN);
368 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_TCP_WIDTH ==
369 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH);
370 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_LBN ==
371 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN);
372 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_WIDTH ==
373 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH);
375 if (rss_context == EF10_RSS_CONTEXT_INVALID) {
380 (void) memset(payload, 0, sizeof (payload));
381 req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_FLAGS;
382 req.emr_in_buf = payload;
383 req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN;
384 req.emr_out_buf = payload;
385 req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN;
387 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
390 type_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE) | EFX_RX_HASH(IPV4_TCP, 2TUPLE) |
391 EFX_RX_HASH(IPV4_UDP, 2TUPLE);
392 type_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE);
393 type_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE) | EFX_RX_HASH(IPV6_TCP, 2TUPLE) |
394 EFX_RX_HASH(IPV6_UDP, 2TUPLE);
395 type_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE);
398 * Create a copy of the original hash type.
399 * The copy will be used to fill in RSS_MODE bits and
400 * may be cleared beforehand. The original variable
401 * and, thus, EN bits will remain unaffected.
406 * If the firmware lacks support for additional modes, RSS_MODE
407 * fields must contain zeros, otherwise the operation will fail.
409 if (encp->enc_rx_scale_additional_modes_supported == B_FALSE)
412 #define EXTRACT_RSS_MODE(_type, _class) \
413 (EFX_EXTRACT_NATIVE(_type, 0, 31, \
414 EFX_LOW_BIT(EFX_RX_CLASS_##_class), \
415 EFX_HIGH_BIT(EFX_RX_CLASS_##_class)) & \
416 EFX_MASK32(EFX_RX_CLASS_##_class))
418 MCDI_IN_POPULATE_DWORD_10(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS,
419 RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN,
420 ((type & type_ipv4) == type_ipv4) ? 1 : 0,
421 RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN,
422 ((type & type_ipv4_tcp) == type_ipv4_tcp) ? 1 : 0,
423 RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN,
424 ((type & type_ipv6) == type_ipv6) ? 1 : 0,
425 RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN,
426 ((type & type_ipv6_tcp) == type_ipv6_tcp) ? 1 : 0,
427 RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE,
428 EXTRACT_RSS_MODE(modes, IPV4_TCP),
429 RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE,
430 EXTRACT_RSS_MODE(modes, IPV4_UDP),
431 RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE,
432 EXTRACT_RSS_MODE(modes, IPV4),
433 RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE,
434 EXTRACT_RSS_MODE(modes, IPV6_TCP),
435 RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE,
436 EXTRACT_RSS_MODE(modes, IPV6_UDP),
437 RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE,
438 EXTRACT_RSS_MODE(modes, IPV6));
440 #undef EXTRACT_RSS_MODE
442 efx_mcdi_execute(enp, &req);
444 if (req.emr_rc != 0) {
454 EFSYS_PROBE1(fail1, efx_rc_t, rc);
458 #endif /* EFSYS_OPT_RX_SCALE */
460 #if EFSYS_OPT_RX_SCALE
462 efx_mcdi_rss_context_set_key(
464 __in uint32_t rss_context,
465 __in_ecount(n) uint8_t *key,
469 uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN,
470 MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN)];
473 if (rss_context == EF10_RSS_CONTEXT_INVALID) {
478 (void) memset(payload, 0, sizeof (payload));
479 req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_KEY;
480 req.emr_in_buf = payload;
481 req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN;
482 req.emr_out_buf = payload;
483 req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN;
485 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
488 EFSYS_ASSERT3U(n, ==, MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
489 if (n != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN) {
494 memcpy(MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY),
497 efx_mcdi_execute(enp, &req);
499 if (req.emr_rc != 0) {
511 EFSYS_PROBE1(fail1, efx_rc_t, rc);
515 #endif /* EFSYS_OPT_RX_SCALE */
517 #if EFSYS_OPT_RX_SCALE
519 efx_mcdi_rss_context_set_table(
521 __in uint32_t rss_context,
522 __in_ecount(n) unsigned int *table,
526 uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN,
527 MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN)];
531 if (rss_context == EF10_RSS_CONTEXT_INVALID) {
536 (void) memset(payload, 0, sizeof (payload));
537 req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_TABLE;
538 req.emr_in_buf = payload;
539 req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN;
540 req.emr_out_buf = payload;
541 req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN;
543 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
547 MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE);
550 i < MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN;
552 req_table[i] = (n > 0) ? (uint8_t)table[i % n] : 0;
555 efx_mcdi_execute(enp, &req);
557 if (req.emr_rc != 0) {
567 EFSYS_PROBE1(fail1, efx_rc_t, rc);
571 #endif /* EFSYS_OPT_RX_SCALE */
574 __checkReturn efx_rc_t
578 #if EFSYS_OPT_RX_SCALE
580 if (efx_mcdi_rss_context_alloc(enp, EFX_RX_SCALE_EXCLUSIVE, EFX_MAXRSS,
581 &enp->en_rss_context) == 0) {
583 * Allocated an exclusive RSS context, which allows both the
584 * indirection table and key to be modified.
586 enp->en_rss_context_type = EFX_RX_SCALE_EXCLUSIVE;
587 enp->en_hash_support = EFX_RX_HASH_AVAILABLE;
590 * Failed to allocate an exclusive RSS context. Continue
591 * operation without support for RSS. The pseudo-header in
592 * received packets will not contain a Toeplitz hash value.
594 enp->en_rss_context_type = EFX_RX_SCALE_UNAVAILABLE;
595 enp->en_hash_support = EFX_RX_HASH_UNAVAILABLE;
598 #endif /* EFSYS_OPT_RX_SCALE */
603 #if EFSYS_OPT_RX_SCATTER
604 __checkReturn efx_rc_t
605 ef10_rx_scatter_enable(
607 __in unsigned int buf_size)
609 _NOTE(ARGUNUSED(enp, buf_size))
612 #endif /* EFSYS_OPT_RX_SCATTER */
614 #if EFSYS_OPT_RX_SCALE
615 __checkReturn efx_rc_t
616 ef10_rx_scale_context_alloc(
618 __in efx_rx_scale_context_type_t type,
619 __in uint32_t num_queues,
620 __out uint32_t *rss_contextp)
624 rc = efx_mcdi_rss_context_alloc(enp, type, num_queues, rss_contextp);
631 EFSYS_PROBE1(fail1, efx_rc_t, rc);
634 #endif /* EFSYS_OPT_RX_SCALE */
636 #if EFSYS_OPT_RX_SCALE
637 __checkReturn efx_rc_t
638 ef10_rx_scale_context_free(
640 __in uint32_t rss_context)
644 rc = efx_mcdi_rss_context_free(enp, rss_context);
651 EFSYS_PROBE1(fail1, efx_rc_t, rc);
654 #endif /* EFSYS_OPT_RX_SCALE */
656 #if EFSYS_OPT_RX_SCALE
657 __checkReturn efx_rc_t
658 ef10_rx_scale_mode_set(
660 __in uint32_t rss_context,
661 __in efx_rx_hash_alg_t alg,
662 __in efx_rx_hash_type_t type,
663 __in boolean_t insert)
665 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
668 EFSYS_ASSERT3U(insert, ==, B_TRUE);
670 if ((encp->enc_rx_scale_hash_alg_mask & (1U << alg)) == 0 ||
676 if (rss_context == EFX_RSS_CONTEXT_DEFAULT) {
677 if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) {
681 rss_context = enp->en_rss_context;
684 if ((rc = efx_mcdi_rss_context_set_flags(enp,
685 rss_context, type)) != 0)
695 EFSYS_PROBE1(fail1, efx_rc_t, rc);
699 #endif /* EFSYS_OPT_RX_SCALE */
701 #if EFSYS_OPT_RX_SCALE
702 __checkReturn efx_rc_t
703 ef10_rx_scale_key_set(
705 __in uint32_t rss_context,
706 __in_ecount(n) uint8_t *key,
711 EFX_STATIC_ASSERT(EFX_RSS_KEY_SIZE ==
712 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
714 if (rss_context == EFX_RSS_CONTEXT_DEFAULT) {
715 if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) {
719 rss_context = enp->en_rss_context;
722 if ((rc = efx_mcdi_rss_context_set_key(enp, rss_context, key, n)) != 0)
730 EFSYS_PROBE1(fail1, efx_rc_t, rc);
734 #endif /* EFSYS_OPT_RX_SCALE */
736 #if EFSYS_OPT_RX_SCALE
737 __checkReturn efx_rc_t
738 ef10_rx_scale_tbl_set(
740 __in uint32_t rss_context,
741 __in_ecount(n) unsigned int *table,
747 if (rss_context == EFX_RSS_CONTEXT_DEFAULT) {
748 if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) {
752 rss_context = enp->en_rss_context;
755 if ((rc = efx_mcdi_rss_context_set_table(enp,
756 rss_context, table, n)) != 0)
764 EFSYS_PROBE1(fail1, efx_rc_t, rc);
768 #endif /* EFSYS_OPT_RX_SCALE */
772 * EF10 RX pseudo-header
773 * ---------------------
775 * Receive packets are prefixed by an (optional) 14 byte pseudo-header:
777 * +00: Toeplitz hash value.
778 * (32bit little-endian)
779 * +04: Outer VLAN tag. Zero if the packet did not have an outer VLAN tag.
781 * +06: Inner VLAN tag. Zero if the packet did not have an inner VLAN tag.
783 * +08: Packet Length. Zero if the RX datapath was in cut-through mode.
784 * (16bit little-endian)
785 * +10: MAC timestamp. Zero if timestamping is not enabled.
786 * (32bit little-endian)
788 * See "The RX Pseudo-header" in SF-109306-TC.
791 __checkReturn efx_rc_t
792 ef10_rx_prefix_pktlen(
794 __in uint8_t *buffer,
795 __out uint16_t *lengthp)
797 _NOTE(ARGUNUSED(enp))
800 * The RX pseudo-header contains the packet length, excluding the
801 * pseudo-header. If the hardware receive datapath was operating in
802 * cut-through mode then the length in the RX pseudo-header will be
803 * zero, and the packet length must be obtained from the DMA length
804 * reported in the RX event.
806 *lengthp = buffer[8] | (buffer[9] << 8);
810 #if EFSYS_OPT_RX_SCALE
811 __checkReturn uint32_t
814 __in efx_rx_hash_alg_t func,
815 __in uint8_t *buffer)
817 _NOTE(ARGUNUSED(enp))
820 case EFX_RX_HASHALG_PACKED_STREAM:
821 case EFX_RX_HASHALG_TOEPLITZ:
832 #endif /* EFSYS_OPT_RX_SCALE */
834 #if EFSYS_OPT_RX_PACKED_STREAM
836 * Fake length for RXQ descriptors in packed stream mode
837 * to make hardware happy
839 #define EFX_RXQ_PACKED_STREAM_FAKE_BUF_SIZE 32
845 __in_ecount(ndescs) efsys_dma_addr_t *addrp,
847 __in unsigned int ndescs,
848 __in unsigned int completed,
849 __in unsigned int added)
856 _NOTE(ARGUNUSED(completed))
858 #if EFSYS_OPT_RX_PACKED_STREAM
860 * Real size of the buffer does not fit into ESF_DZ_RX_KER_BYTE_CNT
861 * and equal to 0 after applying mask. Hardware does not like it.
863 if (erp->er_ev_qstate->eers_rx_packed_stream)
864 size = EFX_RXQ_PACKED_STREAM_FAKE_BUF_SIZE;
867 /* The client driver must not overfill the queue */
868 EFSYS_ASSERT3U(added - completed + ndescs, <=,
869 EFX_RXQ_LIMIT(erp->er_mask + 1));
871 id = added & (erp->er_mask);
872 for (i = 0; i < ndescs; i++) {
873 EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
874 unsigned int, id, efsys_dma_addr_t, addrp[i],
877 EFX_POPULATE_QWORD_3(qword,
878 ESF_DZ_RX_KER_BYTE_CNT, (uint32_t)(size),
879 ESF_DZ_RX_KER_BUF_ADDR_DW0,
880 (uint32_t)(addrp[i] & 0xffffffff),
881 ESF_DZ_RX_KER_BUF_ADDR_DW1,
882 (uint32_t)(addrp[i] >> 32));
884 offset = id * sizeof (efx_qword_t);
885 EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
887 id = (id + 1) & (erp->er_mask);
894 __in unsigned int added,
895 __inout unsigned int *pushedp)
897 efx_nic_t *enp = erp->er_enp;
898 unsigned int pushed = *pushedp;
902 /* Hardware has alignment restriction for WPTR */
903 wptr = P2ALIGN(added, EF10_RX_WPTR_ALIGN);
909 /* Push the populated descriptors out */
910 wptr &= erp->er_mask;
912 EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, wptr);
914 /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
915 EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
916 wptr, pushed & erp->er_mask);
917 EFSYS_PIO_WRITE_BARRIER();
918 EFX_BAR_VI_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
919 erp->er_index, &dword, B_FALSE);
922 #if EFSYS_OPT_RX_PACKED_STREAM
925 ef10_rx_qpush_ps_credits(
928 efx_nic_t *enp = erp->er_enp;
930 efx_evq_rxq_state_t *rxq_state = erp->er_ev_qstate;
933 EFSYS_ASSERT(rxq_state->eers_rx_packed_stream);
935 if (rxq_state->eers_rx_packed_stream_credits == 0)
939 * It is a bug if we think that FW has utilized more
940 * credits than it is allowed to have (maximum). However,
941 * make sure that we do not credit more than maximum anyway.
943 credits = MIN(rxq_state->eers_rx_packed_stream_credits,
944 EFX_RX_PACKED_STREAM_MAX_CREDITS);
945 EFX_POPULATE_DWORD_3(dword,
946 ERF_DZ_RX_DESC_MAGIC_DOORBELL, 1,
947 ERF_DZ_RX_DESC_MAGIC_CMD,
948 ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS,
949 ERF_DZ_RX_DESC_MAGIC_DATA, credits);
950 EFX_BAR_VI_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
951 erp->er_index, &dword, B_FALSE);
953 rxq_state->eers_rx_packed_stream_credits = 0;
957 * In accordance with SF-112241-TC the received data has the following layout:
958 * - 8 byte pseudo-header which consist of:
959 * - 4 byte little-endian timestamp
960 * - 2 byte little-endian captured length in bytes
961 * - 2 byte little-endian original packet length in bytes
962 * - captured packet bytes
963 * - optional padding to align to 64 bytes boundary
964 * - 64 bytes scratch space for the host software
966 __checkReturn uint8_t *
967 ef10_rx_qps_packet_info(
969 __in uint8_t *buffer,
970 __in uint32_t buffer_length,
971 __in uint32_t current_offset,
972 __out uint16_t *lengthp,
973 __out uint32_t *next_offsetp,
974 __out uint32_t *timestamp)
979 efx_evq_rxq_state_t *rxq_state = erp->er_ev_qstate;
981 EFSYS_ASSERT(rxq_state->eers_rx_packed_stream);
983 buffer += current_offset;
984 pkt_start = buffer + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE;
986 qwordp = (efx_qword_t *)buffer;
987 *timestamp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_TSTAMP);
988 *lengthp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_ORIG_LEN);
989 buf_len = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_CAP_LEN);
991 buf_len = P2ROUNDUP(buf_len + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE,
992 EFX_RX_PACKED_STREAM_ALIGNMENT);
994 current_offset + buf_len + EFX_RX_PACKED_STREAM_ALIGNMENT;
996 EFSYS_ASSERT3U(*next_offsetp, <=, buffer_length);
997 EFSYS_ASSERT3U(current_offset + *lengthp, <, *next_offsetp);
999 if ((*next_offsetp ^ current_offset) &
1000 EFX_RX_PACKED_STREAM_MEM_PER_CREDIT)
1001 rxq_state->eers_rx_packed_stream_credits++;
1009 __checkReturn efx_rc_t
1011 __in efx_rxq_t *erp)
1013 efx_nic_t *enp = erp->er_enp;
1016 if ((rc = efx_mcdi_fini_rxq(enp, erp->er_index)) != 0)
1023 * EALREADY is not an error, but indicates that the MC has rebooted and
1024 * that the RXQ has already been destroyed. Callers need to know that
1025 * the RXQ flush has completed to avoid waiting until timeout for a
1026 * flush done event that will not be delivered.
1029 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1036 __in efx_rxq_t *erp)
1039 _NOTE(ARGUNUSED(erp))
1043 __checkReturn efx_rc_t
1045 __in efx_nic_t *enp,
1046 __in unsigned int index,
1047 __in unsigned int label,
1048 __in efx_rxq_type_t type,
1049 __in const efx_rxq_type_data_t *type_data,
1050 __in efsys_mem_t *esmp,
1053 __in unsigned int flags,
1054 __in efx_evq_t *eep,
1055 __in efx_rxq_t *erp)
1057 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1059 boolean_t disable_scatter;
1060 boolean_t want_inner_classes;
1061 unsigned int ps_buf_size;
1062 uint32_t es_bufs_per_desc = 0;
1063 uint32_t es_max_dma_len = 0;
1064 uint32_t es_buf_stride = 0;
1065 uint32_t hol_block_timeout = 0;
1067 _NOTE(ARGUNUSED(id, erp, type_data))
1069 EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << ESF_DZ_RX_QLABEL_WIDTH));
1070 EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
1071 EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
1073 EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS));
1074 EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS));
1076 if (!ISP2(ndescs) ||
1077 (ndescs < EFX_RXQ_MINNDESCS) || (ndescs > EFX_RXQ_MAXNDESCS)) {
1081 if (index >= encp->enc_rxq_limit) {
1087 case EFX_RXQ_TYPE_DEFAULT:
1090 #if EFSYS_OPT_RX_PACKED_STREAM
1091 case EFX_RXQ_TYPE_PACKED_STREAM:
1092 switch (type_data->ertd_packed_stream.eps_buf_size) {
1093 case EFX_RXQ_PACKED_STREAM_BUF_SIZE_1M:
1094 ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M;
1096 case EFX_RXQ_PACKED_STREAM_BUF_SIZE_512K:
1097 ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K;
1099 case EFX_RXQ_PACKED_STREAM_BUF_SIZE_256K:
1100 ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K;
1102 case EFX_RXQ_PACKED_STREAM_BUF_SIZE_128K:
1103 ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K;
1105 case EFX_RXQ_PACKED_STREAM_BUF_SIZE_64K:
1106 ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K;
1113 #endif /* EFSYS_OPT_RX_PACKED_STREAM */
1114 #if EFSYS_OPT_RX_ES_SUPER_BUFFER
1115 case EFX_RXQ_TYPE_ES_SUPER_BUFFER:
1118 type_data->ertd_es_super_buffer.eessb_bufs_per_desc;
1120 type_data->ertd_es_super_buffer.eessb_max_dma_len;
1122 type_data->ertd_es_super_buffer.eessb_buf_stride;
1124 type_data->ertd_es_super_buffer.eessb_hol_block_timeout;
1126 #endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
1132 #if EFSYS_OPT_RX_PACKED_STREAM
1133 if (ps_buf_size != 0) {
1134 /* Check if datapath firmware supports packed stream mode */
1135 if (encp->enc_rx_packed_stream_supported == B_FALSE) {
1139 /* Check if packed stream allows configurable buffer sizes */
1140 if ((ps_buf_size != MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M) &&
1141 (encp->enc_rx_var_packed_stream_supported == B_FALSE)) {
1146 #else /* EFSYS_OPT_RX_PACKED_STREAM */
1147 EFSYS_ASSERT(ps_buf_size == 0);
1148 #endif /* EFSYS_OPT_RX_PACKED_STREAM */
1150 #if EFSYS_OPT_RX_ES_SUPER_BUFFER
1151 if (es_bufs_per_desc > 0) {
1152 if (encp->enc_rx_es_super_buffer_supported == B_FALSE) {
1156 if (!IS_P2ALIGNED(es_max_dma_len,
1157 EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) {
1161 if (!IS_P2ALIGNED(es_buf_stride,
1162 EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) {
1167 #else /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
1168 EFSYS_ASSERT(es_bufs_per_desc == 0);
1169 #endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
1171 /* Scatter can only be disabled if the firmware supports doing so */
1172 if (flags & EFX_RXQ_FLAG_SCATTER)
1173 disable_scatter = B_FALSE;
1175 disable_scatter = encp->enc_rx_disable_scatter_supported;
1177 if (flags & EFX_RXQ_FLAG_INNER_CLASSES)
1178 want_inner_classes = B_TRUE;
1180 want_inner_classes = B_FALSE;
1182 if ((rc = efx_mcdi_init_rxq(enp, ndescs, eep->ee_index, label, index,
1183 esmp, disable_scatter, want_inner_classes,
1184 ps_buf_size, es_bufs_per_desc, es_max_dma_len,
1185 es_buf_stride, hol_block_timeout)) != 0)
1189 erp->er_label = label;
1191 ef10_ev_rxlabel_init(eep, erp, label, type);
1193 erp->er_ev_qstate = &erp->er_eep->ee_rxq_state[label];
1198 EFSYS_PROBE(fail10);
1199 #if EFSYS_OPT_RX_ES_SUPER_BUFFER
1206 #endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
1207 #if EFSYS_OPT_RX_PACKED_STREAM
1212 #endif /* EFSYS_OPT_RX_PACKED_STREAM */
1215 #if EFSYS_OPT_RX_PACKED_STREAM
1218 #endif /* EFSYS_OPT_RX_PACKED_STREAM */
1222 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1229 __in efx_rxq_t *erp)
1231 efx_nic_t *enp = erp->er_enp;
1232 efx_evq_t *eep = erp->er_eep;
1233 unsigned int label = erp->er_label;
1235 ef10_ev_rxlabel_fini(eep, label);
1237 EFSYS_ASSERT(enp->en_rx_qcount != 0);
1238 --enp->en_rx_qcount;
1240 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
1245 __in efx_nic_t *enp)
1247 #if EFSYS_OPT_RX_SCALE
1248 if (enp->en_rss_context_type != EFX_RX_SCALE_UNAVAILABLE)
1249 (void) efx_mcdi_rss_context_free(enp, enp->en_rss_context);
1250 enp->en_rss_context = 0;
1251 enp->en_rss_context_type = EFX_RX_SCALE_UNAVAILABLE;
1253 _NOTE(ARGUNUSED(enp))
1254 #endif /* EFSYS_OPT_RX_SCALE */
1257 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */