2 * Copyright (c) 2002-2007 Neterion, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <dev/nxge/include/xgehal-fifo.h>
30 #include <dev/nxge/include/xgehal-device.h>
32 static xge_hal_status_e
33 __hal_fifo_mempool_item_alloc(xge_hal_mempool_h mempoolh,
36 xge_hal_mempool_dma_t *dma_object,
42 int memblock_item_idx;
43 xge_hal_fifo_txdl_priv_t *txdl_priv;
44 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)item;
45 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)userdata;
48 txdl_priv = (xge_hal_fifo_txdl_priv_t *) \
49 __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
53 xge_assert(txdl_priv);
55 /* pre-format HAL's TxDL's private */
56 txdl_priv->dma_offset = (char*)item - (char*)memblock;
57 txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
58 txdl_priv->dma_handle = dma_object->handle;
59 txdl_priv->memblock = memblock;
60 txdl_priv->first_txdp = (xge_hal_fifo_txd_t *)item;
61 txdl_priv->next_txdl_priv = NULL;
62 txdl_priv->dang_txdl = NULL;
63 txdl_priv->dang_frags = 0;
64 txdl_priv->alloc_frags = 0;
66 #ifdef XGE_DEBUG_ASSERT
67 txdl_priv->dma_object = dma_object;
69 txdp->host_control = (u64)(ulong_t)txdl_priv;
71 #ifdef XGE_HAL_ALIGN_XMIT
72 txdl_priv->align_vaddr = NULL;
73 txdl_priv->align_dma_addr = (dma_addr_t)0;
75 #ifndef XGE_HAL_ALIGN_XMIT_ALLOC_RT
77 xge_hal_status_e status;
78 if (fifo->config->alignment_size) {
79 status =__hal_fifo_dtr_align_alloc_map(fifo, txdp);
80 if (status != XGE_HAL_OK) {
82 "align buffer[%d] %d bytes, status %d",
93 if (fifo->channel.dtr_init) {
94 fifo->channel.dtr_init(fifo, (xge_hal_dtr_h)txdp, index,
95 fifo->channel.userdata, XGE_HAL_CHANNEL_OC_NORMAL);
102 static xge_hal_status_e
103 __hal_fifo_mempool_item_free(xge_hal_mempool_h mempoolh,
106 xge_hal_mempool_dma_t *dma_object,
112 int memblock_item_idx;
113 xge_hal_fifo_txdl_priv_t *txdl_priv;
114 #ifdef XGE_HAL_ALIGN_XMIT
115 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)userdata;
120 txdl_priv = (xge_hal_fifo_txdl_priv_t *) \
121 __hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
125 xge_assert(txdl_priv);
127 #ifdef XGE_HAL_ALIGN_XMIT
128 if (fifo->config->alignment_size) {
129 if (txdl_priv->align_dma_addr != 0) {
130 xge_os_dma_unmap(fifo->channel.pdev,
131 txdl_priv->align_dma_handle,
132 txdl_priv->align_dma_addr,
134 XGE_OS_DMA_DIR_TODEVICE);
136 txdl_priv->align_dma_addr = 0;
139 if (txdl_priv->align_vaddr != NULL) {
140 xge_os_dma_free(fifo->channel.pdev,
141 txdl_priv->align_vaddr,
143 &txdl_priv->align_dma_acch,
144 &txdl_priv->align_dma_handle);
146 txdl_priv->align_vaddr = NULL;
155 __hal_fifo_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
157 xge_hal_device_t *hldev;
158 xge_hal_status_e status;
159 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
160 xge_hal_fifo_queue_t *queue;
161 int i, txdl_size, max_arr_index, mid_point;
164 hldev = (xge_hal_device_t *)fifo->channel.devh;
165 fifo->config = &hldev->config.fifo;
166 queue = &fifo->config->queue[attr->post_qid];
168 #if defined(XGE_HAL_TX_MULTI_RESERVE)
169 xge_os_spin_lock_init(&fifo->channel.reserve_lock, hldev->pdev);
170 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
171 xge_os_spin_lock_init_irq(&fifo->channel.reserve_lock, hldev->irqh);
173 #if defined(XGE_HAL_TX_MULTI_POST)
174 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
175 fifo->post_lock_ptr = &hldev->xena_post_lock;
177 xge_os_spin_lock_init(&fifo->channel.post_lock, hldev->pdev);
178 fifo->post_lock_ptr = &fifo->channel.post_lock;
180 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
181 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
182 fifo->post_lock_ptr = &hldev->xena_post_lock;
184 xge_os_spin_lock_init_irq(&fifo->channel.post_lock,
186 fifo->post_lock_ptr = &fifo->channel.post_lock;
191 fifo->config->alignment_size * fifo->config->max_aligned_frags;
193 /* Initializing the BAR1 address as the start of
194 * the FIFO queue pointer and as a location of FIFO control
197 (xge_hal_fifo_hw_pair_t *) (void *)(hldev->bar1 +
198 (attr->post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET));
200 /* apply "interrupts per txdl" attribute */
201 fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_UTILZ;
203 fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_PER_LIST;
205 fifo->no_snoop_bits =
206 (int)(XGE_HAL_TX_FIFO_NO_SNOOP(queue->no_snoop_bits));
209 * FIFO memory management strategy:
211 * TxDL splitted into three independent parts:
213 * - TxD HAL private part
214 * - upper layer private part
216 * Adaptative memory allocation used. i.e. Memory allocated on
217 * demand with the size which will fit into one memory block.
218 * One memory block may contain more than one TxDL. In simple case
219 * memory block size can be equal to CPU page size. On more
220 * sophisticated OS's memory block can be contigious across
223 * During "reserve" operations more memory can be allocated on demand
224 * for example due to FIFO full condition.
226 * Pool of memory memblocks never shrinks except __hal_fifo_close
227 * routine which will essentially stop channel and free the resources.
230 /* TxDL common private size == TxDL private + ULD private */
231 fifo->priv_size = sizeof(xge_hal_fifo_txdl_priv_t) +
233 fifo->priv_size = ((fifo->priv_size + __xge_os_cacheline_size -1) /
234 __xge_os_cacheline_size) *
235 __xge_os_cacheline_size;
237 /* recompute txdl size to be cacheline aligned */
238 fifo->txdl_size = fifo->config->max_frags * sizeof(xge_hal_fifo_txd_t);
239 txdl_size = ((fifo->txdl_size + __xge_os_cacheline_size - 1) /
240 __xge_os_cacheline_size) * __xge_os_cacheline_size;
242 if (fifo->txdl_size != txdl_size)
243 xge_debug_fifo(XGE_ERR, "cacheline > 128 ( ?? ): %d, %d, %d, %d",
244 fifo->config->max_frags, fifo->txdl_size, txdl_size,
245 __xge_os_cacheline_size);
247 fifo->txdl_size = txdl_size;
249 /* since dtr_init() callback will be called from item_alloc(),
250 * the same way channels userdata might be used prior to
251 * channel_initialize() */
252 fifo->channel.dtr_init = attr->dtr_init;
253 fifo->channel.userdata = attr->userdata;
254 fifo->txdl_per_memblock = fifo->config->memblock_size /
257 fifo->mempool = __hal_mempool_create(hldev->pdev,
258 fifo->config->memblock_size,
263 __hal_fifo_mempool_item_alloc,
264 __hal_fifo_mempool_item_free,
266 if (fifo->mempool == NULL) {
267 return XGE_HAL_ERR_OUT_OF_MEMORY;
270 status = __hal_channel_initialize(channelh, attr,
271 (void **) __hal_mempool_items_arr(fifo->mempool),
272 queue->initial, queue->max,
273 fifo->config->reserve_threshold);
274 if (status != XGE_HAL_OK) {
275 __hal_fifo_close(channelh);
278 xge_debug_fifo(XGE_TRACE,
279 "DTR reserve_length:%d reserve_top:%d\n"
280 "max_frags:%d reserve_threshold:%d\n"
281 "memblock_size:%d alignment_size:%d max_aligned_frags:%d",
282 fifo->channel.reserve_length, fifo->channel.reserve_top,
283 fifo->config->max_frags, fifo->config->reserve_threshold,
284 fifo->config->memblock_size, fifo->config->alignment_size,
285 fifo->config->max_aligned_frags);
287 #ifdef XGE_DEBUG_ASSERT
288 for ( i = 0; i < fifo->channel.reserve_length; i++) {
289 xge_debug_fifo(XGE_TRACE, "DTR before reversing index:%d"
290 " handle:%p", i, fifo->channel.reserve_arr[i]);
294 xge_assert(fifo->channel.reserve_length);
295 /* reverse the FIFO dtr array */
296 max_arr_index = fifo->channel.reserve_length - 1;
297 max_arr_index -=fifo->channel.reserve_top;
298 xge_assert(max_arr_index);
299 mid_point = (fifo->channel.reserve_length - fifo->channel.reserve_top)/2;
300 for (i = 0; i < mid_point; i++) {
301 dtrh = fifo->channel.reserve_arr[i];
302 fifo->channel.reserve_arr[i] =
303 fifo->channel.reserve_arr[max_arr_index - i];
304 fifo->channel.reserve_arr[max_arr_index - i] = dtrh;
307 #ifdef XGE_DEBUG_ASSERT
308 for ( i = 0; i < fifo->channel.reserve_length; i++) {
309 xge_debug_fifo(XGE_TRACE, "DTR after reversing index:%d"
310 " handle:%p", i, fifo->channel.reserve_arr[i]);
318 __hal_fifo_close(xge_hal_channel_h channelh)
320 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
321 xge_hal_device_t *hldev = (xge_hal_device_t *)fifo->channel.devh;
324 __hal_mempool_destroy(fifo->mempool);
327 __hal_channel_terminate(channelh);
329 #if defined(XGE_HAL_TX_MULTI_RESERVE)
330 xge_os_spin_lock_destroy(&fifo->channel.reserve_lock, hldev->pdev);
331 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
332 xge_os_spin_lock_destroy_irq(&fifo->channel.reserve_lock, hldev->pdev);
334 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
335 #if defined(XGE_HAL_TX_MULTI_POST)
336 xge_os_spin_lock_destroy(&fifo->channel.post_lock, hldev->pdev);
337 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
338 xge_os_spin_lock_destroy_irq(&fifo->channel.post_lock,
345 __hal_fifo_hw_initialize(xge_hal_device_h devh)
347 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
348 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
349 u64* tx_fifo_partitions[4];
351 u64 tx_fifo_wrr_value[5];
355 /* Tx DMA Initialization */
357 tx_fifo_partitions[0] = &bar0->tx_fifo_partition_0;
358 tx_fifo_partitions[1] = &bar0->tx_fifo_partition_1;
359 tx_fifo_partitions[2] = &bar0->tx_fifo_partition_2;
360 tx_fifo_partitions[3] = &bar0->tx_fifo_partition_3;
362 tx_fifo_wrr[0] = &bar0->tx_w_round_robin_0;
363 tx_fifo_wrr[1] = &bar0->tx_w_round_robin_1;
364 tx_fifo_wrr[2] = &bar0->tx_w_round_robin_2;
365 tx_fifo_wrr[3] = &bar0->tx_w_round_robin_3;
366 tx_fifo_wrr[4] = &bar0->tx_w_round_robin_4;
368 tx_fifo_wrr_value[0] = XGE_HAL_FIFO_WRR_0;
369 tx_fifo_wrr_value[1] = XGE_HAL_FIFO_WRR_1;
370 tx_fifo_wrr_value[2] = XGE_HAL_FIFO_WRR_2;
371 tx_fifo_wrr_value[3] = XGE_HAL_FIFO_WRR_3;
372 tx_fifo_wrr_value[4] = XGE_HAL_FIFO_WRR_4;
374 /* Note: WRR calendar must be configured before the transmit
375 * FIFOs are enabled! page 6-77 user guide */
377 if (!hldev->config.rts_qos_en) {
378 /* all zeroes for Round-Robin */
379 for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) {
380 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0,
384 /* reset all of them but '0' */
385 for (i=1; i < XGE_HAL_FIFO_MAX_PARTITION; i++) {
386 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
387 tx_fifo_partitions[i]);
389 } else { /* Change the default settings */
391 for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) {
392 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
393 tx_fifo_wrr_value[i], tx_fifo_wrr[i]);
397 /* configure only configured FIFOs */
398 val64 = 0; part0 = 0;
399 for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
400 int reg_half = i % 2;
403 if (hldev->config.fifo.queue[i].configured) {
404 int priority = hldev->config.fifo.queue[i].priority;
406 vBIT((hldev->config.fifo.queue[i].max-1),
407 (((reg_half) * 32) + 19),
408 13) | vBIT(priority, (((reg_half)*32) + 5), 3);
411 /* NOTE: do write operation for each second u64 half
412 * or force for first one if configured number
416 /* skip partition '0', must write it once at
420 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
421 val64, tx_fifo_partitions[reg_num]);
422 xge_debug_fifo(XGE_TRACE,
423 "fifo partition_%d at: "
424 "0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT,
425 reg_num, (unsigned long long)(ulong_t)
426 tx_fifo_partitions[reg_num],
427 (unsigned long long)val64);
433 part0 |= BIT(0); /* to enable the FIFO partition. */
434 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)part0,
435 tx_fifo_partitions[0]);
437 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(part0>>32),
438 tx_fifo_partitions[0]);
439 xge_debug_fifo(XGE_TRACE, "fifo partition_0 at: "
440 "0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT,
441 (unsigned long long)(ulong_t)
442 tx_fifo_partitions[0],
443 (unsigned long long) part0);
446 * Initialization of Tx_PA_CONFIG register to ignore packet
447 * integrity checking.
449 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
451 val64 |= XGE_HAL_TX_PA_CFG_IGNORE_FRM_ERR |
452 XGE_HAL_TX_PA_CFG_IGNORE_SNAP_OUI |
453 XGE_HAL_TX_PA_CFG_IGNORE_LLC_CTRL |
454 XGE_HAL_TX_PA_CFG_IGNORE_L2_ERR;
455 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
459 * Assign MSI-X vectors
461 for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
463 xge_hal_channel_t *channel = NULL;
465 if (!hldev->config.fifo.queue[i].configured ||
466 !hldev->config.fifo.queue[i].intr_vector ||
467 !hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX)
471 xge_list_for_each(item, &hldev->free_channels) {
472 xge_hal_channel_t *tmp;
473 tmp = xge_container_of(item, xge_hal_channel_t,
475 if (tmp->type == XGE_HAL_CHANNEL_TYPE_FIFO &&
476 tmp->post_qid == i) {
483 xge_hal_channel_msix_set(channel,
484 hldev->config.fifo.queue[i].intr_vector);
488 xge_debug_fifo(XGE_TRACE, "%s", "fifo channels initialized");
491 #ifdef XGE_HAL_ALIGN_XMIT
493 __hal_fifo_dtr_align_free_unmap(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
495 xge_hal_fifo_txdl_priv_t *txdl_priv;
496 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
497 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
499 txdl_priv = __hal_fifo_txdl_priv(txdp);
501 if (txdl_priv->align_dma_addr != 0) {
502 xge_os_dma_unmap(fifo->channel.pdev,
503 txdl_priv->align_dma_handle,
504 txdl_priv->align_dma_addr,
506 XGE_OS_DMA_DIR_TODEVICE);
508 txdl_priv->align_dma_addr = 0;
511 if (txdl_priv->align_vaddr != NULL) {
512 xge_os_dma_free(fifo->channel.pdev,
513 txdl_priv->align_vaddr,
515 &txdl_priv->align_dma_acch,
516 &txdl_priv->align_dma_handle);
519 txdl_priv->align_vaddr = NULL;
524 __hal_fifo_dtr_align_alloc_map(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
526 xge_hal_fifo_txdl_priv_t *txdl_priv;
527 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
528 xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
532 txdl_priv = __hal_fifo_txdl_priv(txdp);
534 /* allocate alignment DMA-buffer */
535 txdl_priv->align_vaddr = (char *)xge_os_dma_malloc(fifo->channel.pdev,
537 XGE_OS_DMA_CACHELINE_ALIGNED |
538 XGE_OS_DMA_STREAMING,
539 &txdl_priv->align_dma_handle,
540 &txdl_priv->align_dma_acch);
541 if (txdl_priv->align_vaddr == NULL) {
542 return XGE_HAL_ERR_OUT_OF_MEMORY;
546 txdl_priv->align_dma_addr = xge_os_dma_map(fifo->channel.pdev,
547 txdl_priv->align_dma_handle, txdl_priv->align_vaddr,
549 XGE_OS_DMA_DIR_TODEVICE, XGE_OS_DMA_STREAMING);
551 if (txdl_priv->align_dma_addr == XGE_OS_INVALID_DMA_ADDR) {
552 __hal_fifo_dtr_align_free_unmap(channelh, dtrh);
553 return XGE_HAL_ERR_OUT_OF_MAPPING;