2 * Copyright (c) 2002-2007 Neterion, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <dev/nxge/include/xgehal-ring.h>
30 #include <dev/nxge/include/xgehal-device.h>
32 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
34 __hal_ring_item_dma_offset(xge_hal_mempool_h mempoolh,
40 /* get owner memblock index */
41 memblock_idx = __hal_ring_block_memblock_idx(item);
43 /* get owner memblock by memblock index */
44 memblock = __hal_mempool_memblock(mempoolh, memblock_idx);
46 return (char*)item - (char*)memblock;
51 __hal_ring_item_dma_addr(xge_hal_mempool_h mempoolh, void *item,
52 pci_dma_h *dma_handle)
56 xge_hal_mempool_dma_t *memblock_dma_object;
57 ptrdiff_t dma_item_offset;
59 /* get owner memblock index */
60 memblock_idx = __hal_ring_block_memblock_idx((xge_hal_ring_block_t *) item);
62 /* get owner memblock by memblock index */
63 memblock = __hal_mempool_memblock((xge_hal_mempool_t *) mempoolh,
66 /* get memblock DMA object by memblock index */
68 __hal_mempool_memblock_dma((xge_hal_mempool_t *) mempoolh,
71 /* calculate offset in the memblock of this item */
72 dma_item_offset = (char*)item - (char*)memblock;
74 *dma_handle = memblock_dma_object->handle;
76 return memblock_dma_object->addr + dma_item_offset;
80 __hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh,
81 xge_hal_ring_t *ring, int from, int to)
83 xge_hal_ring_block_t *to_item, *from_item;
84 dma_addr_t to_dma, from_dma;
85 pci_dma_h to_dma_handle, from_dma_handle;
87 /* get "from" RxD block */
88 from_item = (xge_hal_ring_block_t *)
89 __hal_mempool_item((xge_hal_mempool_t *) mempoolh, from);
90 xge_assert(from_item);
92 /* get "to" RxD block */
93 to_item = (xge_hal_ring_block_t *)
94 __hal_mempool_item((xge_hal_mempool_t *) mempoolh, to);
97 /* return address of the beginning of previous RxD block */
98 to_dma = __hal_ring_item_dma_addr(mempoolh, to_item, &to_dma_handle);
100 /* set next pointer for this RxD block to point on
101 * previous item's DMA start address */
102 __hal_ring_block_next_pointer_set(from_item, to_dma);
104 /* return "from" RxD block's DMA start address */
106 __hal_ring_item_dma_addr(mempoolh, from_item, &from_dma_handle);
108 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
109 /* we must sync "from" RxD block, so hardware will see it */
110 xge_os_dma_sync(ring->channel.pdev,
112 from_dma + XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
113 __hal_ring_item_dma_offset(mempoolh, from_item) +
114 XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
116 XGE_OS_DMA_DIR_TODEVICE);
119 xge_debug_ring(XGE_TRACE, "block%d:0x"XGE_OS_LLXFMT" => block%d:0x"XGE_OS_LLXFMT,
120 from, (unsigned long long)from_dma, to,
121 (unsigned long long)to_dma);
124 static xge_hal_status_e
125 __hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh,
128 xge_hal_mempool_dma_t *dma_object,
135 xge_hal_ring_t *ring = (xge_hal_ring_t *)userdata;
141 /* format rxds array */
142 for (i=ring->rxds_per_block-1; i>=0; i--) {
144 xge_hal_ring_rxd_priv_t *rxd_priv;
145 xge_hal_ring_rxd_1_t *rxdp;
146 int reserve_index = index * ring->rxds_per_block + i;
147 int memblock_item_idx;
149 ring->reserved_rxds_arr[reserve_index] = (char *)item +
150 (ring->rxds_per_block - 1 - i) * ring->rxd_size;
152 /* Note: memblock_item_idx is index of the item within
153 * the memblock. For instance, in case of three RxD-blocks
154 * per memblock this value can be 0,1 or 2. */
156 __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
157 memblock_index, item,
159 rxdp = (xge_hal_ring_rxd_1_t *)
160 ring->reserved_rxds_arr[reserve_index];
161 rxd_priv = (xge_hal_ring_rxd_priv_t *) (void *)
162 ((char*)rxdblock_priv + ring->rxd_priv_size * i);
164 /* pre-format per-RxD Ring's private */
165 rxd_priv->dma_offset = (char*)rxdp - (char*)memblock;
166 rxd_priv->dma_addr = dma_object->addr + rxd_priv->dma_offset;
167 rxd_priv->dma_handle = dma_object->handle;
168 #ifdef XGE_DEBUG_ASSERT
169 rxd_priv->dma_object = dma_object;
172 /* pre-format Host_Control */
173 #if defined(XGE_HAL_USE_5B_MODE)
174 if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
175 xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)rxdp;
176 #if defined(XGE_OS_PLATFORM_64BIT)
177 xge_assert(memblock_index <= 0xFFFF);
178 xge_assert(i <= 0xFFFF);
179 /* store memblock's index */
180 rxdp_5->host_control = (u32)memblock_index << 16;
181 /* store index of memblock's private */
182 rxdp_5->host_control |= (u32)(memblock_item_idx *
183 ring->rxds_per_block + i);
186 rxdp_5->host_control = (u32)rxd_priv;
189 /* 1b and 3b modes */
190 rxdp->host_control = (u64)(ulong_t)rxd_priv;
193 /* 1b and 3b modes */
194 rxdp->host_control = (u64)(ulong_t)rxd_priv;
198 __hal_ring_block_memblock_idx_set((xge_hal_ring_block_t *) item, memblock_index);
201 /* link last one with first one */
202 __hal_ring_rxdblock_link(mempoolh, ring, 0, index);
206 /* link this RxD block with previous one */
207 __hal_ring_rxdblock_link(mempoolh, ring, index, index-1);
214 __hal_ring_initial_replenish(xge_hal_channel_t *channel,
215 xge_hal_channel_reopen_e reopen)
217 xge_hal_dtr_h dtr = NULL;
219 while (xge_hal_channel_dtr_count(channel) > 0) {
220 xge_hal_status_e status;
222 status = xge_hal_ring_dtr_reserve(channel, &dtr);
223 xge_assert(status == XGE_HAL_OK);
225 if (channel->dtr_init) {
226 status = channel->dtr_init(channel,
227 dtr, channel->reserve_length,
230 if (status != XGE_HAL_OK) {
231 xge_hal_ring_dtr_free(channel, dtr);
232 xge_hal_channel_abort(channel,
233 XGE_HAL_CHANNEL_OC_NORMAL);
238 xge_hal_ring_dtr_post(channel, dtr);
245 __hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
247 xge_hal_status_e status;
248 xge_hal_device_t *hldev;
249 xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
250 xge_hal_ring_queue_t *queue;
253 /* Note: at this point we have channel.devh and channel.pdev
256 hldev = (xge_hal_device_t *)ring->channel.devh;
257 ring->config = &hldev->config.ring;
258 queue = &ring->config->queue[attr->post_qid];
259 ring->indicate_max_pkts = queue->indicate_max_pkts;
260 ring->buffer_mode = queue->buffer_mode;
262 xge_assert(queue->configured);
264 #if defined(XGE_HAL_RX_MULTI_RESERVE)
265 xge_os_spin_lock_init(&ring->channel.reserve_lock, hldev->pdev);
266 #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
267 xge_os_spin_lock_init_irq(&ring->channel.reserve_lock, hldev->irqh);
269 #if defined(XGE_HAL_RX_MULTI_POST)
270 xge_os_spin_lock_init(&ring->channel.post_lock, hldev->pdev);
271 #elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
272 xge_os_spin_lock_init_irq(&ring->channel.post_lock, hldev->irqh);
275 ring->rxd_size = XGE_HAL_RING_RXD_SIZEOF(queue->buffer_mode);
276 ring->rxd_priv_size =
277 sizeof(xge_hal_ring_rxd_priv_t) + attr->per_dtr_space;
279 /* how many RxDs can fit into one block. Depends on configured
281 ring->rxds_per_block = XGE_HAL_RING_RXDS_PER_BLOCK(queue->buffer_mode);
283 /* calculate actual RxD block private size */
284 ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
286 ring->reserved_rxds_arr = (void **) xge_os_malloc(ring->channel.pdev,
287 sizeof(void*) * queue->max * ring->rxds_per_block);
289 if (ring->reserved_rxds_arr == NULL) {
290 __hal_ring_close(channelh);
291 return XGE_HAL_ERR_OUT_OF_MEMORY;
294 ring->mempool = __hal_mempool_create(
296 ring->config->memblock_size,
297 XGE_HAL_RING_RXDBLOCK_SIZE,
298 ring->rxdblock_priv_size,
299 queue->initial, queue->max,
300 __hal_ring_mempool_item_alloc,
301 NULL, /* nothing to free */
303 if (ring->mempool == NULL) {
304 __hal_ring_close(channelh);
305 return XGE_HAL_ERR_OUT_OF_MEMORY;
308 status = __hal_channel_initialize(channelh,
310 ring->reserved_rxds_arr,
311 queue->initial * ring->rxds_per_block,
312 queue->max * ring->rxds_per_block,
313 0 /* no threshold for ring! */);
314 if (status != XGE_HAL_OK) {
315 __hal_ring_close(channelh);
319 /* sanity check that everything formatted ok */
320 xge_assert(ring->reserved_rxds_arr[0] ==
321 (char *)ring->mempool->items_arr[0] +
322 (ring->rxds_per_block * ring->rxd_size - ring->rxd_size));
325 * Specifying dtr_init callback means two things:
326 * 1) dtrs need to be initialized by ULD at channel-open time;
327 * 2) dtrs need to be posted at channel-open time
328 * (that's what the initial_replenish() below does)
329 * Currently we don't have a case when the 1) is done without the 2).
331 if (ring->channel.dtr_init) {
332 if ((status = __hal_ring_initial_replenish (
333 (xge_hal_channel_t *) channelh,
334 XGE_HAL_CHANNEL_OC_NORMAL) )
336 __hal_ring_close(channelh);
341 /* initial replenish will increment the counter in its post() routine,
342 * we have to reset it */
343 ring->channel.usage_cnt = 0;
349 __hal_ring_close(xge_hal_channel_h channelh)
351 xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
352 xge_hal_ring_queue_t *queue;
353 #if defined(XGE_HAL_RX_MULTI_RESERVE)||defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)||\
354 defined(XGE_HAL_RX_MULTI_POST) || defined(XGE_HAL_RX_MULTI_POST_IRQ)
355 xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
358 xge_assert(ring->channel.pdev);
360 queue = &ring->config->queue[ring->channel.post_qid];
363 __hal_mempool_destroy(ring->mempool);
366 if (ring->reserved_rxds_arr) {
367 xge_os_free(ring->channel.pdev,
368 ring->reserved_rxds_arr,
369 sizeof(void*) * queue->max * ring->rxds_per_block);
372 __hal_channel_terminate(channelh);
374 #if defined(XGE_HAL_RX_MULTI_RESERVE)
375 xge_os_spin_lock_destroy(&ring->channel.reserve_lock, hldev->pdev);
376 #elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
377 xge_os_spin_lock_destroy_irq(&ring->channel.reserve_lock, hldev->pdev);
379 #if defined(XGE_HAL_RX_MULTI_POST)
380 xge_os_spin_lock_destroy(&ring->channel.post_lock, hldev->pdev);
381 #elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
382 xge_os_spin_lock_destroy_irq(&ring->channel.post_lock, hldev->pdev);
387 __hal_ring_prc_enable(xge_hal_channel_h channelh)
389 xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
390 xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
391 xge_hal_pci_bar0_t *bar0;
395 xge_hal_ring_queue_t *queue;
396 pci_dma_h dma_handle;
399 xge_assert(ring->channel.pdev);
400 bar0 = (xge_hal_pci_bar0_t *) (void *)
401 ((xge_hal_device_t *)ring->channel.devh)->bar0;
403 queue = &ring->config->queue[ring->channel.post_qid];
404 xge_assert(queue->buffer_mode == 1 ||
405 queue->buffer_mode == 3 ||
406 queue->buffer_mode == 5);
408 /* last block in fact becomes first. This is just the way it
409 * is filled up and linked by item_alloc() */
411 block_num = queue->initial;
412 first_block = __hal_mempool_item(ring->mempool, block_num - 1);
413 val64 = __hal_ring_item_dma_addr(ring->mempool,
414 first_block, &dma_handle);
415 xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
416 val64, &bar0->prc_rxd0_n[ring->channel.post_qid]);
418 xge_debug_ring(XGE_TRACE, "ring%d PRC DMA addr 0x"XGE_OS_LLXFMT" initialized",
419 ring->channel.post_qid, (unsigned long long)val64);
421 val64 = xge_os_pio_mem_read64(ring->channel.pdev,
422 ring->channel.regh0, &bar0->prc_ctrl_n[ring->channel.post_qid]);
423 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC &&
425 val64 |= XGE_HAL_PRC_CTRL_RTH_DISABLE;
427 val64 |= XGE_HAL_PRC_CTRL_RC_ENABLED;
429 val64 |= vBIT((queue->buffer_mode >> 1),14,2);/* 1,3 or 5 => 0,1 or 2 */
430 val64 &= ~XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
431 val64 |= XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(
432 (hldev->config.pci_freq_mherz * queue->backoff_interval_us));
434 /* Beware: no snoop by the bridge if (no_snoop_bits) */
435 val64 |= XGE_HAL_PRC_CTRL_NO_SNOOP(queue->no_snoop_bits);
437 /* Herc: always use group_reads */
438 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
439 val64 |= XGE_HAL_PRC_CTRL_GROUP_READS;
441 if (hldev->config.bimodal_interrupts)
442 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
443 val64 |= XGE_HAL_PRC_CTRL_BIMODAL_INTERRUPT;
445 xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
446 val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
448 /* Configure Receive Protocol Assist */
449 val64 = xge_os_pio_mem_read64(ring->channel.pdev,
450 ring->channel.regh0, &bar0->rx_pa_cfg);
451 val64 |= XGE_HAL_RX_PA_CFG_SCATTER_MODE(ring->config->scatter_mode);
452 val64 |= (XGE_HAL_RX_PA_CFG_IGNORE_SNAP_OUI | XGE_HAL_RX_PA_CFG_IGNORE_LLC_CTRL);
453 /* Clean STRIP_VLAN_TAG bit and set as config from upper layer */
454 val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
455 val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(ring->config->strip_vlan_tag);
457 xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
458 val64, &bar0->rx_pa_cfg);
460 xge_debug_ring(XGE_TRACE, "ring%d enabled in buffer_mode %d",
461 ring->channel.post_qid, queue->buffer_mode);
465 __hal_ring_prc_disable(xge_hal_channel_h channelh)
467 xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
468 xge_hal_pci_bar0_t *bar0;
472 xge_assert(ring->channel.pdev);
473 bar0 = (xge_hal_pci_bar0_t *) (void *)
474 ((xge_hal_device_t *)ring->channel.devh)->bar0;
476 val64 = xge_os_pio_mem_read64(ring->channel.pdev,
478 &bar0->prc_ctrl_n[ring->channel.post_qid]);
479 val64 &= ~((u64) XGE_HAL_PRC_CTRL_RC_ENABLED);
480 xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
481 val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
485 __hal_ring_hw_initialize(xge_hal_device_h devh)
487 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
488 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
492 /* Rx DMA intialization. */
495 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
496 if (!hldev->config.ring.queue[i].configured)
498 val64 |= vBIT(hldev->config.ring.queue[i].priority,
501 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
502 &bar0->rx_queue_priority);
503 xge_debug_ring(XGE_TRACE, "Rings priority configured to 0x"XGE_OS_LLXFMT,
504 (unsigned long long)val64);
506 /* Configuring ring queues according to per-ring configuration */
508 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
509 if (!hldev->config.ring.queue[i].configured)
511 val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8);
513 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
514 &bar0->rx_queue_cfg);
515 xge_debug_ring(XGE_TRACE, "DRAM configured to 0x"XGE_OS_LLXFMT,
516 (unsigned long long)val64);
518 if (!hldev->config.rts_qos_en &&
519 !hldev->config.rts_port_en &&
520 !hldev->config.rts_mac_en) {
523 * Activate default (QoS-based) Rx steering
526 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
527 &bar0->rts_qos_steering);
528 for (j = 0; j < 8 /* QoS max */; j++)
530 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++)
532 if (!hldev->config.ring.queue[i].configured)
534 if (!hldev->config.ring.queue[i].rth_en)
535 val64 |= (BIT(i) >> (j*8));
538 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
539 &bar0->rts_qos_steering);
540 xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x"XGE_OS_LLXFMT,
541 (unsigned long long)val64);
545 /* Note: If a queue does not exist, it should be assigned a maximum
546 * length of zero. Otherwise, packet loss could occur.
549 * All configured rings will be properly set at device open time
550 * by utilizing device_mtu_set() API call. */
551 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
552 if (hldev->config.ring.queue[i].configured)
554 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
555 &bar0->rts_frm_len_n[i]);
558 #ifdef XGE_HAL_HERC_EMULATION
559 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
560 ((u8 *)bar0 + 0x2e60)); /* mc_rldram_mrs_herc */
561 val64 |= 0x0000000000010000;
562 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
563 ((u8 *)bar0 + 0x2e60));
565 val64 |= 0x003a000000000000;
566 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
567 ((u8 *)bar0 + 0x2e40)); /* mc_rldram_ref_herc */
571 /* now enabling MC-RLDRAM after setting MC_QUEUE sizes */
572 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
573 &bar0->mc_rldram_mrs);
574 val64 |= XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE |
575 XGE_HAL_MC_RLDRAM_MRS_ENABLE;
576 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
577 &bar0->mc_rldram_mrs);
579 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64,
580 &bar0->mc_rldram_mrs);
582 /* RLDRAM initialization procedure require 500us to complete */
585 /* Temporary fixes for Herc RLDRAM */
586 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
587 val64 = XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(0x0279);
588 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
589 &bar0->mc_rldram_ref_per_herc);
591 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
592 &bar0->mc_rldram_mrs_herc);
593 xge_debug_ring(XGE_TRACE, "default mc_rldram_mrs_herc 0x"XGE_OS_LLXFMT,
594 (unsigned long long)val64);
596 val64 = 0x0003570003010300ULL;
597 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
598 &bar0->mc_rldram_mrs_herc);
604 * Assign MSI-X vectors
606 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
608 xge_hal_channel_t *channel = NULL;
610 if (!hldev->config.ring.queue[i].configured ||
611 !hldev->config.ring.queue[i].intr_vector ||
612 !hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX)
616 xge_list_for_each(item, &hldev->free_channels) {
617 xge_hal_channel_t *tmp;
618 tmp = xge_container_of(item, xge_hal_channel_t,
620 if (tmp->type == XGE_HAL_CHANNEL_TYPE_RING &&
621 tmp->post_qid == i) {
628 xge_hal_channel_msix_set(channel,
629 hldev->config.ring.queue[i].intr_vector);
633 xge_debug_ring(XGE_TRACE, "%s", "ring channels initialized");
637 __hal_ring_mtu_set(xge_hal_device_h devh, int new_frmlen)
640 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
641 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
643 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
644 if (!hldev->config.ring.queue[i].configured)
646 if (hldev->config.ring.queue[i].max_frm_len !=
647 XGE_HAL_RING_USE_MTU) {
648 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
649 XGE_HAL_MAC_RTS_FRM_LEN_SET(
650 hldev->config.ring.queue[i].max_frm_len),
651 &bar0->rts_frm_len_n[i]);
653 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
654 XGE_HAL_MAC_RTS_FRM_LEN_SET(new_frmlen),
655 &bar0->rts_frm_len_n[i]);
658 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
659 XGE_HAL_RMAC_MAX_PYLD_LEN(new_frmlen),
660 &bar0->rmac_max_pyld_len);