2 * Copyright(c) 2002-2011 Exar Corp.
5 * Redistribution and use in source and binary forms, with or without
6 * modification are permitted provided the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Exar Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 #include <dev/vxge/vxgehal/vxgehal.h>
36 * __hal_fifo_mempool_item_alloc - Allocate List blocks for TxD list callback
37 * @mempoolh: Handle to memory pool
38 * @memblock: Address of this memory block
39 * @memblock_index: Index of this memory block
40 * @dma_object: dma object for this block
41 * @item: Pointer to this item
42 * @index: Index of this item in memory block
43 * @is_last: If this is last item in the block
44 * @userdata: Specific data of user
46 * This function is callback passed to __hal_mempool_create to create memory
49 static vxge_hal_status_e
50 __hal_fifo_mempool_item_alloc(
51 vxge_hal_mempool_h mempoolh,
54 vxge_hal_mempool_dma_t *dma_object,
62 u32 memblock_item_idx;
64 __hal_fifo_t *fifo = (__hal_fifo_t *) userdata;
66 vxge_assert(fifo != NULL);
69 #if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK)
71 __hal_device_t *hldev = (__hal_device_t *) fifo->channel.devh;
73 vxge_hal_trace_log_pool("==> %s:%s:%d",
74 __FILE__, __func__, __LINE__);
76 vxge_hal_trace_log_pool(
77 "mempoolh = 0x"VXGE_OS_STXFMT", "
78 "memblock = 0x"VXGE_OS_STXFMT", memblock_index = %d, "
79 "dma_object = 0x"VXGE_OS_STXFMT", \
80 item = 0x"VXGE_OS_STXFMT", "
81 "item_index = %d, is_last = %d, userdata = 0x"VXGE_OS_STXFMT,
82 (ptr_t) mempoolh, (ptr_t) memblock, memblock_index,
83 (ptr_t) dma_object, (ptr_t) item, item_index, is_last,
88 block_priv = __hal_mempool_item_priv((vxge_hal_mempool_t *) mempoolh,
89 memblock_index, item, &memblock_item_idx);
91 vxge_assert(block_priv != NULL);
93 for (i = 0; i < fifo->txdl_per_memblock; i++) {
95 __hal_fifo_txdl_priv_t *txdl_priv;
96 vxge_hal_fifo_txd_t *txdp;
98 int dtr_index = item_index * fifo->txdl_per_memblock + i;
100 txdp = (vxge_hal_fifo_txd_t *) ((void *)
101 ((char *) item + i * fifo->txdl_size));
103 txdp->host_control = dtr_index;
105 fifo->channel.dtr_arr[dtr_index].dtr = txdp;
107 fifo->channel.dtr_arr[dtr_index].uld_priv = (void *)
108 ((char *) block_priv + fifo->txdl_priv_size * i);
110 fifo->channel.dtr_arr[dtr_index].hal_priv = (void *)
111 (((char *) fifo->channel.dtr_arr[dtr_index].uld_priv) +
112 fifo->per_txdl_space);
114 txdl_priv = (__hal_fifo_txdl_priv_t *)
115 fifo->channel.dtr_arr[dtr_index].hal_priv;
117 vxge_assert(txdl_priv);
119 /* pre-format HAL's TxDL's private */
121 txdl_priv->dma_offset = (char *) txdp - (char *) memblock;
122 txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
123 txdl_priv->dma_handle = dma_object->handle;
124 txdl_priv->memblock = memblock;
125 txdl_priv->first_txdp = (vxge_hal_fifo_txd_t *) txdp;
126 txdl_priv->next_txdl_priv = NULL;
127 txdl_priv->dang_txdl = NULL;
128 txdl_priv->dang_frags = 0;
129 txdl_priv->alloc_frags = 0;
131 #if defined(VXGE_DEBUG_ASSERT)
132 txdl_priv->dma_object = dma_object;
135 #if defined(VXGE_HAL_ALIGN_XMIT)
136 txdl_priv->align_vaddr = NULL;
137 txdl_priv->align_dma_addr = (dma_addr_t) 0;
139 #ifndef VXGE_HAL_ALIGN_XMIT_ALLOC_RT
142 vxge_hal_status_e status;
144 if (fifo->config->alignment_size) {
145 status = __hal_fifo_txdl_align_alloc_map(fifo,
147 if (status != VXGE_HAL_OK) {
149 #if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK)
150 __hal_device_t *hldev;
151 hldev = (__hal_device_t *)
154 vxge_hal_err_log_pool(
155 "align buffer[%d] %d bytes, \
157 (item_index * fifo->txdl_per_memblock + i),
158 fifo->align_size, status);
160 vxge_hal_trace_log_pool(
161 "<== %s:%s:%d Result: 0",
162 __FILE__, __func__, __LINE__);
170 if (fifo->txdl_init) {
171 fifo->txdl_init(fifo->channel.vph,
172 (vxge_hal_txdl_h) txdp,
173 VXGE_HAL_FIFO_ULD_PRIV(fifo, txdp),
174 VXGE_HAL_FIFO_TXDL_INDEX(txdp),
175 fifo->channel.userdata, VXGE_HAL_OPEN_NORMAL);
179 #if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK)
181 __hal_device_t *hldev = (__hal_device_t *) fifo->channel.devh;
183 vxge_hal_trace_log_pool("<== %s:%s:%d Result: 0",
184 __FILE__, __func__, __LINE__);
188 return (VXGE_HAL_OK);
193 * __hal_fifo_mempool_item_free - Free List blocks for TxD list callback
194 * @mempoolh: Handle to memory pool
195 * @memblock: Address of this memory block
196 * @memblock_index: Index of this memory block
197 * @dma_object: dma object for this block
198 * @item: Pointer to this item
199 * @index: Index of this item in memory block
200 * @is_last: If this is last item in the block
201 * @userdata: Specific data of user
203 * This function is callback passed to __hal_mempool_free to destroy memory
206 static vxge_hal_status_e
207 __hal_fifo_mempool_item_free(
208 vxge_hal_mempool_h mempoolh,
211 vxge_hal_mempool_dma_t *dma_object,
219 #if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK)
221 __hal_fifo_t *fifo = (__hal_fifo_t *) userdata;
223 vxge_assert(fifo != NULL);
225 __hal_device_t *hldev = (__hal_device_t *) fifo->channel.devh;
227 vxge_hal_trace_log_pool("==> %s:%s:%d",
228 __FILE__, __func__, __LINE__);
230 vxge_hal_trace_log_pool("mempoolh = 0x"VXGE_OS_STXFMT", "
231 "memblock = 0x"VXGE_OS_STXFMT", memblock_index = %d, "
232 "dma_object = 0x"VXGE_OS_STXFMT", \
233 item = 0x"VXGE_OS_STXFMT", "
234 "item_index = %d, is_last = %d, userdata = 0x"VXGE_OS_STXFMT,
235 (ptr_t) mempoolh, (ptr_t) memblock, memblock_index,
236 (ptr_t) dma_object, (ptr_t) item, item_index, is_last,
241 #if defined(VXGE_HAL_ALIGN_XMIT)
243 __hal_fifo_t *fifo = (__hal_fifo_t *) userdata;
245 vxge_assert(fifo != NULL);
246 if (fifo->config->alignment_size) {
249 vxge_hal_fifo_txd_t *txdp;
251 for (i = 0; i < fifo->txdl_per_memblock; i++) {
253 ((char *) item + i * fifo->txdl_size);
254 __hal_fifo_txdl_align_free_unmap(fifo, txdp);
260 #if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK)
262 __hal_fifo_t *fifo = (__hal_fifo_t *) userdata;
264 vxge_assert(fifo != NULL);
266 __hal_device_t *hldev = (__hal_device_t *) fifo->channel.devh;
268 vxge_hal_trace_log_pool("<== %s:%s:%d Result: 0",
269 __FILE__, __func__, __LINE__);
273 return (VXGE_HAL_OK);
277 * __hal_fifo_create - Create a FIFO
278 * @vpath_handle: Handle returned by virtual path open
279 * @attr: FIFO configuration parameters structure
281 * This function creates FIFO and initializes it.
286 vxge_hal_vpath_h vpath_handle,
287 vxge_hal_fifo_attr_t *attr)
289 vxge_hal_status_e status;
291 vxge_hal_fifo_config_t *config;
292 u32 txdl_size, memblock_size, txdl_per_memblock;
293 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
294 __hal_device_t *hldev;
296 vxge_assert((vpath_handle != NULL) && (attr != NULL));
298 hldev = (__hal_device_t *) vp->vpath->hldev;
300 vxge_hal_trace_log_fifo("==> %s:%s:%d",
301 __FILE__, __func__, __LINE__);
303 vxge_hal_trace_log_fifo(
304 "vpath_handle = 0x"VXGE_OS_STXFMT", attr = 0x"VXGE_OS_STXFMT,
305 (ptr_t) vpath_handle, (ptr_t) attr);
307 if ((vpath_handle == NULL) || (attr == NULL)) {
308 vxge_hal_err_log_fifo("null pointer passed == > %s : %d",
310 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d",
311 __FILE__, __func__, __LINE__,
312 VXGE_HAL_ERR_INVALID_HANDLE);
313 return (VXGE_HAL_ERR_INVALID_HANDLE);
317 &vp->vpath->hldev->header.config.vp_config[vp->vpath->vp_id].fifo;
319 txdl_size = config->max_frags * sizeof(vxge_hal_fifo_txd_t);
321 if (txdl_size <= VXGE_OS_HOST_PAGE_SIZE)
322 memblock_size = VXGE_OS_HOST_PAGE_SIZE;
324 memblock_size = txdl_size;
326 txdl_per_memblock = memblock_size / txdl_size;
328 config->fifo_length = ((config->fifo_length + txdl_per_memblock - 1) /
329 txdl_per_memblock) * txdl_per_memblock;
331 fifo = (__hal_fifo_t *) vxge_hal_channel_allocate(
332 (vxge_hal_device_h) vp->vpath->hldev,
334 VXGE_HAL_CHANNEL_TYPE_FIFO,
336 attr->per_txdl_space,
340 vxge_hal_err_log_fifo("Memory allocation failed == > %s : %d",
342 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d",
343 __FILE__, __func__, __LINE__,
344 VXGE_HAL_ERR_OUT_OF_MEMORY);
345 return (VXGE_HAL_ERR_OUT_OF_MEMORY);
348 vp->vpath->fifoh = fifo;
350 fifo->stats = &vp->vpath->sw_stats->fifo_stats;
352 fifo->config = config;
354 fifo->memblock_size = memblock_size;
356 #if defined(VXGE_HAL_TX_MULTI_POST)
357 vxge_os_spin_lock_init(&fifo->channel.post_lock,
358 vp->vpath->hldev->header.pdev);
359 #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
360 vxge_os_spin_lock_init_irq(&fifo->channel.post_lock,
361 vp->vpath->hldev->header.irqh);
365 fifo->config->alignment_size * fifo->config->max_aligned_frags;
367 /* apply "interrupts per txdl" attribute */
368 fifo->interrupt_type = VXGE_HAL_FIFO_TXD_INT_TYPE_UTILZ;
369 if (fifo->config->intr) {
370 fifo->interrupt_type = VXGE_HAL_FIFO_TXD_INT_TYPE_PER_LIST;
373 fifo->no_snoop_bits = config->no_snoop_bits;
376 * FIFO memory management strategy:
378 * TxDL splitted into three independent parts:
380 * - TxD HAL private part
381 * - upper layer private part
383 * Adaptative memory allocation used. i.e. Memory allocated on
384 * demand with the size which will fit into one memory block.
385 * One memory block may contain more than one TxDL. In simple case
386 * memory block size can be equal to CPU page size. On more
387 * sophisticated OS's memory block can be contigious across
390 * During "reserve" operations more memory can be allocated on demand
391 * for example due to FIFO full condition.
393 * Pool of memory memblocks never shrinks except __hal_fifo_close
394 * routine which will essentially stop channel and free the resources.
397 /* TxDL common private size == TxDL private + ULD private */
398 fifo->txdl_priv_size =
399 sizeof(__hal_fifo_txdl_priv_t) + attr->per_txdl_space;
400 fifo->txdl_priv_size =
401 ((fifo->txdl_priv_size + __vxge_os_cacheline_size - 1) /
402 __vxge_os_cacheline_size) * __vxge_os_cacheline_size;
404 fifo->per_txdl_space = attr->per_txdl_space;
406 /* recompute txdl size to be cacheline aligned */
407 fifo->txdl_size = txdl_size;
408 fifo->txdl_per_memblock = txdl_per_memblock;
411 * since txdl_init() callback will be called from item_alloc(),
412 * the same way channels userdata might be used prior to
413 * channel_initialize()
415 fifo->txdl_init = attr->txdl_init;
416 fifo->txdl_term = attr->txdl_term;
417 fifo->callback = attr->callback;
419 if (fifo->txdl_per_memblock == 0) {
420 __hal_fifo_delete(vpath_handle);
421 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d",
422 __FILE__, __func__, __LINE__,
423 VXGE_HAL_ERR_INVALID_BLOCK_SIZE);
424 return (VXGE_HAL_ERR_INVALID_BLOCK_SIZE);
427 /* calculate actual TxDL block private size */
428 fifo->txdlblock_priv_size =
429 fifo->txdl_priv_size * fifo->txdl_per_memblock;
432 vxge_hal_mempool_create((vxge_hal_device_h) vp->vpath->hldev,
435 fifo->txdlblock_priv_size,
436 fifo->config->fifo_length /
437 fifo->txdl_per_memblock,
438 fifo->config->fifo_length /
439 fifo->txdl_per_memblock,
440 __hal_fifo_mempool_item_alloc,
441 __hal_fifo_mempool_item_free,
444 if (fifo->mempool == NULL) {
445 __hal_fifo_delete(vpath_handle);
446 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d",
447 __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY);
448 return (VXGE_HAL_ERR_OUT_OF_MEMORY);
451 status = vxge_hal_channel_initialize(&fifo->channel);
452 if (status != VXGE_HAL_OK) {
453 __hal_fifo_delete(vpath_handle);
454 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d",
455 __FILE__, __func__, __LINE__, status);
459 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0",
460 __FILE__, __func__, __LINE__);
461 return (VXGE_HAL_OK);
465 * __hal_fifo_abort - Returns the TxD
466 * @fifoh: Fifo to be reset
467 * @reopen: See vxge_hal_reopen_e {}.
469 * This function terminates the TxDs of fifo
473 vxge_hal_fifo_h fifoh,
474 vxge_hal_reopen_e reopen)
477 __hal_fifo_t *fifo = (__hal_fifo_t *) fifoh;
478 __hal_device_t *hldev;
479 vxge_hal_txdl_h txdlh;
481 vxge_assert(fifoh != NULL);
483 hldev = (__hal_device_t *) fifo->channel.devh;
485 vxge_hal_trace_log_fifo("==> %s:%s:%d",
486 __FILE__, __func__, __LINE__);
488 vxge_hal_trace_log_fifo("fifo = 0x"VXGE_OS_STXFMT", reopen = %d",
489 (ptr_t) fifoh, reopen);
491 if (fifo->txdl_term) {
492 __hal_channel_for_each_dtr(&fifo->channel, txdlh, i) {
493 if (!__hal_channel_is_posted_dtr(&fifo->channel,
495 fifo->txdl_term(fifo->channel.vph, txdlh,
496 VXGE_HAL_FIFO_ULD_PRIV(fifo, txdlh),
497 VXGE_HAL_TXDL_STATE_FREED,
498 fifo->channel.userdata,
505 __hal_channel_dtr_try_complete(&fifo->channel, &txdlh);
510 __hal_channel_dtr_complete(&fifo->channel);
512 if (fifo->txdl_term) {
513 fifo->txdl_term(fifo->channel.vph, txdlh,
514 VXGE_HAL_FIFO_ULD_PRIV(fifo, txdlh),
515 VXGE_HAL_TXDL_STATE_POSTED,
516 fifo->channel.userdata,
520 __hal_channel_dtr_free(&fifo->channel,
521 VXGE_HAL_FIFO_TXDL_INDEX(txdlh));
524 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0",
525 __FILE__, __func__, __LINE__);
529 * __hal_fifo_reset - Resets the fifo
530 * @fifoh: Fifo to be reset
532 * This function resets the fifo during vpath reset operation
536 vxge_hal_fifo_h fifoh)
538 vxge_hal_status_e status;
539 __hal_device_t *hldev;
540 __hal_fifo_t *fifo = (__hal_fifo_t *) fifoh;
542 vxge_assert(fifoh != NULL);
544 hldev = (__hal_device_t *) fifo->channel.devh;
546 vxge_hal_trace_log_fifo("==> %s:%s:%d",
547 __FILE__, __func__, __LINE__);
549 vxge_hal_trace_log_fifo("fifo = 0x"VXGE_OS_STXFMT,
552 __hal_fifo_abort(fifoh, VXGE_HAL_RESET_ONLY);
554 status = __hal_channel_reset(&fifo->channel);
556 if (status != VXGE_HAL_OK) {
558 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d",
559 __FILE__, __func__, __LINE__, status);
564 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0",
565 __FILE__, __func__, __LINE__);
567 return (VXGE_HAL_OK);
571 * vxge_hal_fifo_doorbell_reset - Resets the doorbell fifo
572 * @vapth_handle: Vpath Handle
574 * This function resets the doorbell fifo during if fifo error occurs
577 vxge_hal_fifo_doorbell_reset(
578 vxge_hal_vpath_h vpath_handle)
581 vxge_hal_txdl_h txdlh;
583 __hal_virtualpath_t *vpath;
584 __hal_fifo_txdl_priv_t *txdl_priv;
585 __hal_device_t *hldev;
586 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
587 vxge_hal_status_e status = VXGE_HAL_OK;
589 vxge_assert(vpath_handle != NULL);
591 hldev = vp->vpath->hldev;
593 vxge_hal_trace_log_fifo("==> %s:%s:%d",
594 __FILE__, __func__, __LINE__);
596 vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT,
597 (ptr_t) vpath_handle);
599 fifo = (__hal_fifo_t *) vp->vpath->fifoh;
601 vpath = ((__hal_vpath_handle_t *) fifo->channel.vph)->vpath;
603 status = __hal_non_offload_db_reset(fifo->channel.vph);
605 if (status != VXGE_HAL_OK) {
606 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0",
607 __FILE__, __func__, __LINE__);
611 __hal_channel_for_each_posted_dtr(&fifo->channel, txdlh, i) {
613 txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
615 __hal_non_offload_db_post(fifo->channel.vph,
616 ((VXGE_HAL_FIFO_TXD_NO_BW_LIMIT_GET(
617 ((vxge_hal_fifo_txd_t *) txdlh)->control_1)) ?
618 (((u64) txdl_priv->dma_addr) | 0x1) :
619 (u64) txdl_priv->dma_addr),
620 txdl_priv->frags - 1,
621 vpath->vp_config->fifo.no_snoop_bits);
624 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0",
625 __FILE__, __func__, __LINE__);
631 * __hal_fifo_delete - Removes the FIFO
632 * @vpath_handle: Virtual path handle to which this queue belongs
634 * This function freeup the memory pool and removes the FIFO
638 vxge_hal_vpath_h vpath_handle)
640 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
642 __hal_device_t *hldev;
644 vxge_assert(vpath_handle != NULL);
646 hldev = vp->vpath->hldev;
648 vxge_hal_trace_log_fifo("==> %s:%s:%d",
649 __FILE__, __func__, __LINE__);
651 vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT,
652 (ptr_t) vpath_handle);
654 fifo = (__hal_fifo_t *) vp->vpath->fifoh;
656 vxge_assert(fifo != NULL);
659 __hal_fifo_abort(vp->vpath->fifoh, VXGE_HAL_OPEN_NORMAL);
660 vxge_hal_mempool_destroy(fifo->mempool);
663 vxge_hal_channel_terminate(&fifo->channel);
665 #if defined(VXGE_HAL_TX_MULTI_POST)
666 vxge_os_spin_lock_destroy(&fifo->channel.post_lock,
667 vp->vpath->hldev->header.pdev);
668 #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
669 vxge_os_spin_lock_destroy_irq(&fifo->channel.post_lock,
670 vp->vpath->hldev->header.pdev);
673 vxge_hal_channel_free(&fifo->channel);
675 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0",
676 __FILE__, __func__, __LINE__);
679 #if defined(VXGE_HAL_ALIGN_XMIT)
681 * __hal_fifo_txdl_align_free_unmap - Unmap the alignement buffers
685 * This function unmaps dma memory for the alignment buffers
688 __hal_fifo_txdl_align_free_unmap(
690 vxge_hal_fifo_txd_t *txdp)
692 __hal_device_t *hldev;
693 __hal_fifo_txdl_priv_t *txdl_priv;
695 vxge_assert((fifo != NULL) && (txdp != NULL));
697 hldev = (__hal_device_t *) fifo->channel.devh;
699 vxge_hal_trace_log_fifo("==> %s:%s:%d",
700 __FILE__, __func__, __LINE__);
702 vxge_hal_trace_log_fifo(
703 "fifo = 0x"VXGE_OS_STXFMT", txdp = 0x"VXGE_OS_STXFMT,
704 (ptr_t) fifo, (ptr_t) txdp);
706 txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp);
708 if (txdl_priv->align_vaddr != NULL) {
709 __hal_blockpool_free(fifo->channel.devh,
710 txdl_priv->align_vaddr,
712 &txdl_priv->align_dma_addr,
713 &txdl_priv->align_dma_handle,
714 &txdl_priv->align_dma_acch);
716 txdl_priv->align_vaddr = NULL;
717 txdl_priv->align_dma_addr = 0;
720 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0",
721 __FILE__, __func__, __LINE__);
725 * __hal_fifo_txdl_align_alloc_map - Maps the alignement buffers
729 * This function maps dma memory for the alignment buffers
732 __hal_fifo_txdl_align_alloc_map(
734 vxge_hal_fifo_txd_t *txdp)
736 __hal_device_t *hldev;
737 __hal_fifo_txdl_priv_t *txdl_priv;
739 vxge_assert((fifo != NULL) && (txdp != NULL));
741 hldev = (__hal_device_t *) fifo->channel.devh;
743 vxge_hal_trace_log_fifo("==> %s:%s:%d",
744 __FILE__, __func__, __LINE__);
746 vxge_hal_trace_log_fifo(
747 "fifo = 0x"VXGE_OS_STXFMT", txdp = 0x"VXGE_OS_STXFMT,
748 (ptr_t) fifo, (ptr_t) txdp);
750 txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp);
752 /* allocate alignment DMA-buffer */
753 txdl_priv->align_vaddr =
754 (u8 *) __hal_blockpool_malloc(fifo->channel.devh,
756 &txdl_priv->align_dma_addr,
757 &txdl_priv->align_dma_handle,
758 &txdl_priv->align_dma_acch);
759 if (txdl_priv->align_vaddr == NULL) {
760 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d",
761 __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY);
762 return (VXGE_HAL_ERR_OUT_OF_MEMORY);
765 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0",
766 __FILE__, __func__, __LINE__);
767 return (VXGE_HAL_OK);
771 * vxge_hal_fifo_free_txdl_count_get - returns the number of txdls
772 * available in the fifo
773 * @vpath_handle: Virtual path handle.
776 vxge_hal_fifo_free_txdl_count_get(vxge_hal_vpath_h vpath_handle)
778 return __hal_channel_free_dtr_count(&((__hal_fifo_t *)
779 ((__hal_vpath_handle_t *) vpath_handle)->vpath->fifoh)->channel);
783 * vxge_hal_fifo_txdl_private_get - Retrieve per-descriptor private data.
784 * @vpath_handle: Virtual path handle.
785 * @txdlh: Descriptor handle.
787 * Retrieve per-descriptor private data.
788 * Note that ULD requests per-descriptor space via
789 * vxge_hal_fifo_attr_t passed to
790 * vxge_hal_vpath_open().
792 * Returns: private ULD data associated with the descriptor.
795 vxge_hal_fifo_txdl_private_get(
796 vxge_hal_vpath_h vpath_handle,
797 vxge_hal_txdl_h txdlh)
799 return (VXGE_HAL_FIFO_ULD_PRIV(((__hal_fifo_t *)
800 ((__hal_vpath_handle_t *) vpath_handle)->vpath->fifoh), txdlh));
804 * vxge_hal_fifo_txdl_reserve - Reserve fifo descriptor.
805 * @vapth_handle: virtual path handle.
806 * @txdlh: Reserved descriptor. On success HAL fills this "out" parameter
807 * with a valid handle.
808 * @txdl_priv: Buffer to return the pointer to per txdl space
810 * Reserve a single TxDL (that is, fifo descriptor)
811 * for the subsequent filling-in by upper layerdriver (ULD))
812 * and posting on the corresponding channel (@channelh)
813 * via vxge_hal_fifo_txdl_post().
815 * Note: it is the responsibility of ULD to reserve multiple descriptors
816 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
817 * carries up to configured number (fifo.max_frags) of contiguous buffers.
819 * Returns: VXGE_HAL_OK - success;
820 * VXGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
824 vxge_hal_fifo_txdl_reserve(
825 vxge_hal_vpath_h vpath_handle,
826 vxge_hal_txdl_h *txdlh,
830 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
831 __hal_device_t *hldev;
833 vxge_hal_status_e status;
835 #if defined(VXGE_HAL_TX_MULTI_POST_IRQ)
836 unsigned long flags = 0;
840 vxge_assert((vpath_handle != NULL) && (txdlh != NULL));
842 hldev = vp->vpath->hldev;
844 vxge_hal_trace_log_fifo("==> %s:%s:%d",
845 __FILE__, __func__, __LINE__);
847 vxge_hal_trace_log_fifo(
848 "vpath_handle = 0x"VXGE_OS_STXFMT", txdlh = 0x"VXGE_OS_STXFMT,
849 (ptr_t) vpath_handle, (ptr_t) txdlh);
851 fifo = (__hal_fifo_t *) vp->vpath->fifoh;
853 vxge_assert(fifo != NULL);
855 #if defined(VXGE_HAL_TX_MULTI_POST)
856 vxge_os_spin_lock(&fifo->channel.post_lock);
857 #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
858 vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags);
861 status = __hal_channel_dtr_reserve(&fifo->channel, txdlh);
863 #if defined(VXGE_HAL_TX_MULTI_POST)
864 vxge_os_spin_unlock(&fifo->channel.post_lock);
865 #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
866 vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags);
869 if (status == VXGE_HAL_OK) {
870 vxge_hal_fifo_txd_t *txdp = (vxge_hal_fifo_txd_t *)*txdlh;
871 __hal_fifo_txdl_priv_t *priv;
873 priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp);
875 /* reset the TxDL's private */
876 priv->align_dma_offset = 0;
877 priv->align_vaddr_start = priv->align_vaddr;
878 priv->align_used_frags = 0;
880 priv->alloc_frags = fifo->config->max_frags;
881 priv->dang_txdl = NULL;
882 priv->dang_frags = 0;
883 priv->next_txdl_priv = NULL;
884 priv->bytes_sent = 0;
886 *txdl_priv = VXGE_HAL_FIFO_ULD_PRIV(fifo, txdp);
888 for (i = 0; i < fifo->config->max_frags; i++) {
889 txdp = ((vxge_hal_fifo_txd_t *)*txdlh) + i;
890 txdp->control_0 = txdp->control_1 = 0;
893 #if defined(VXGE_OS_MEMORY_CHECK)
898 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0",
899 __FILE__, __func__, __LINE__);
904 * vxge_hal_fifo_txdl_buffer_set - Set transmit buffer pointer in the
906 * @vpath_handle: virtual path handle.
907 * @txdlh: Descriptor handle.
908 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
910 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
911 * @size: Size of the data buffer (in bytes).
913 * This API is part of the preparation of the transmit descriptor for posting
914 * (via vxge_hal_fifo_txdl_post()). The related "preparation" APIs include
915 * vxge_hal_fifo_txdl_mss_set() and vxge_hal_fifo_txdl_cksum_set_bits().
916 * All three APIs fill in the fields of the fifo descriptor,
917 * in accordance with the X3100 specification.
921 vxge_hal_fifo_txdl_buffer_set(
922 vxge_hal_vpath_h vpath_handle,
923 vxge_hal_txdl_h txdlh,
925 dma_addr_t dma_pointer,
928 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
930 __hal_device_t *hldev;
931 __hal_fifo_txdl_priv_t *txdl_priv;
932 vxge_hal_fifo_txd_t *txdp;
934 vxge_assert((vpath_handle != NULL) && (txdlh != NULL) &&
935 (dma_pointer != 0) && (size != 0));
937 hldev = vp->vpath->hldev;
939 vxge_hal_trace_log_fifo("==> %s:%s:%d",
940 __FILE__, __func__, __LINE__);
942 vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
943 "txdlh = 0x"VXGE_OS_STXFMT", frag_idx = %d, "
944 "dma_pointer = 0x"VXGE_OS_LLXFMT", size = %lu",
945 (ptr_t) vpath_handle, (ptr_t) txdlh,
946 frag_idx, (u64) dma_pointer, size);
948 fifo = (__hal_fifo_t *) vp->vpath->fifoh;
950 vxge_assert(fifo != NULL);
952 txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
954 txdp = (vxge_hal_fifo_txd_t *) txdlh + txdl_priv->frags;
958 * it is the responsibility of upper layers and not HAL
959 * detect it and skip zero-size fragment
961 vxge_assert(size > 0);
962 vxge_assert(frag_idx < txdl_priv->alloc_frags);
964 txdp->buffer_pointer = (u64) dma_pointer;
965 txdp->control_0 |= VXGE_HAL_FIFO_TXD_BUFFER_SIZE(size);
966 txdl_priv->bytes_sent += size;
967 fifo->stats->total_buffers++;
970 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0",
971 __FILE__, __func__, __LINE__);
975 * vxge_hal_fifo_txdl_buffer_set_aligned - Align transmit buffer and fill
976 * in fifo descriptor.
977 * @vpath_handle: Virtual path handle.
978 * @txdlh: Descriptor handle.
979 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
981 * @vaddr: Virtual address of the data buffer.
982 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
983 * @size: Size of the data buffer (in bytes).
984 * @misaligned_size: Size (in bytes) of the misaligned portion of the
985 * data buffer. Calculated by the caller, based on the platform/OS/other
986 * specific criteria, which is outside of HAL's domain. See notes below.
988 * This API is part of the transmit descriptor preparation for posting
989 * (via vxge_hal_fifo_txdl_post()). The related "preparation" APIs include
990 * vxge_hal_fifo_txdl_mss_set() and vxge_hal_fifo_txdl_cksum_set_bits().
991 * All three APIs fill in the fields of the fifo descriptor,
992 * in accordance with the X3100 specification.
993 * On the PCI-X based systems aligning transmit data typically provides better
994 * transmit performance. The typical alignment granularity: L2 cacheline size.
995 * However, HAL does not make assumptions in terms of the alignment granularity;
996 * this is specified via additional @misaligned_size parameter described above.
997 * Prior to calling vxge_hal_fifo_txdl_buffer_set_aligned(),
998 * ULD is supposed to check alignment of a given fragment/buffer. For this HAL
999 * provides a separate vxge_hal_check_alignment() API sufficient to cover
1000 * most (but not all) possible alignment criteria.
1001 * If the buffer appears to be aligned, the ULD calls
1002 * vxge_hal_fifo_txdl_buffer_set().
1003 * Otherwise, ULD calls vxge_hal_fifo_txdl_buffer_set_aligned().
1005 * Note; This API is a "superset" of vxge_hal_fifo_txdl_buffer_set(). In
1006 * addition to filling in the specified descriptor it aligns transmit data on
1007 * the specified boundary.
1008 * Note: Decision on whether to align or not to align a given contiguous
1009 * transmit buffer is outside of HAL's domain. To this end ULD can use any
1010 * programmable criteria, which can help to 1) boost transmit performance,
1011 * and/or 2) provide a workaround for PCI bridge bugs, if any.
1015 vxge_hal_fifo_txdl_buffer_set_aligned(
1016 vxge_hal_vpath_h vpath_handle,
1017 vxge_hal_txdl_h txdlh,
1020 dma_addr_t dma_pointer,
1022 u32 misaligned_size)
1024 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1026 __hal_device_t *hldev;
1027 __hal_fifo_txdl_priv_t *txdl_priv;
1028 vxge_hal_fifo_txd_t *txdp;
1030 ptrdiff_t prev_boff;
1032 vxge_assert((vpath_handle != NULL) && (txdlh != NULL) &&
1033 (vaddr != 0) && (dma_pointer != 0) &&
1034 (size != 0) && (misaligned_size != 0));
1036 hldev = vp->vpath->hldev;
1038 vxge_hal_trace_log_fifo("==> %s:%s:%d",
1039 __FILE__, __func__, __LINE__);
1041 vxge_hal_trace_log_fifo(
1042 "vpath_handle = 0x"VXGE_OS_STXFMT", txdlh = 0x"VXGE_OS_STXFMT", "
1043 "frag_idx = %d, vaddr = 0x"VXGE_OS_STXFMT", "
1044 "dma_pointer = 0x"VXGE_OS_LLXFMT", size = %d, "
1045 "misaligned_size = %d", (ptr_t) vpath_handle,
1046 (ptr_t) txdlh, frag_idx, (ptr_t) vaddr, (u64) dma_pointer, size,
1049 fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1051 vxge_assert(fifo != NULL);
1053 txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
1055 txdp = (vxge_hal_fifo_txd_t *) txdlh + txdl_priv->frags;
1058 * On some systems buffer size could be zero.
1059 * It is the responsibility of ULD and *not HAL* to
1060 * detect it and skip it.
1062 vxge_assert(size > 0);
1063 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1064 vxge_assert(misaligned_size != 0 &&
1065 misaligned_size <= fifo->config->alignment_size);
1067 remaining_size = size - misaligned_size;
1068 vxge_assert(remaining_size >= 0);
1070 vxge_os_memcpy((char *) txdl_priv->align_vaddr_start,
1071 vaddr, misaligned_size);
1073 if (txdl_priv->align_used_frags >= fifo->config->max_aligned_frags) {
1074 return (VXGE_HAL_ERR_OUT_ALIGNED_FRAGS);
1077 /* setup new buffer */
1079 prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
1080 txdp->buffer_pointer = (u64) txdl_priv->align_dma_addr + prev_boff;
1081 txdp->control_0 |= VXGE_HAL_FIFO_TXD_BUFFER_SIZE(misaligned_size);
1082 txdl_priv->bytes_sent += misaligned_size;
1083 fifo->stats->total_buffers++;
1085 txdl_priv->align_used_frags++;
1086 txdl_priv->align_vaddr_start += fifo->config->alignment_size;
1087 txdl_priv->align_dma_offset = 0;
1089 #if defined(VXGE_OS_DMA_REQUIRES_SYNC)
1090 /* sync new buffer */
1091 vxge_os_dma_sync(fifo->channel.pdev,
1092 txdl_priv->align_dma_handle,
1093 txdp->buffer_pointer,
1096 VXGE_OS_DMA_DIR_TODEVICE);
1099 if (remaining_size) {
1100 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1102 txdp->buffer_pointer = (u64) dma_pointer + misaligned_size;
1104 VXGE_HAL_FIFO_TXD_BUFFER_SIZE(remaining_size);
1105 txdl_priv->bytes_sent += remaining_size;
1106 fifo->stats->total_buffers++;
1110 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0",
1111 __FILE__, __func__, __LINE__);
1112 return (VXGE_HAL_OK);
1116 * vxge_hal_fifo_txdl_buffer_append - Append the contents of virtually
1117 * contiguous data buffer to a single physically contiguous buffer.
1118 * @vpath_handle: Virtual path handle.
1119 * @txdlh: Descriptor handle.
1120 * @vaddr: Virtual address of the data buffer.
1121 * @size: Size of the data buffer (in bytes).
1123 * This API is part of the transmit descriptor preparation for posting
1124 * (via vxge_hal_fifo_txdl_post()).
1125 * The main difference of this API wrt to the APIs
1126 * vxge_hal_fifo_txdl_buffer_set_aligned() is that this API appends the
1127 * contents of virtually contiguous data buffers received from
1128 * upper layer into a single physically contiguous data buffer and the
1129 * device will do a DMA from this buffer.
1131 * See Also: vxge_hal_fifo_txdl_buffer_finalize(),
1132 * vxge_hal_fifo_txdl_buffer_set(),
1133 * vxge_hal_fifo_txdl_buffer_set_aligned().
1136 vxge_hal_fifo_txdl_buffer_append(
1137 vxge_hal_vpath_h vpath_handle,
1138 vxge_hal_txdl_h txdlh,
1142 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1144 __hal_device_t *hldev;
1145 __hal_fifo_txdl_priv_t *txdl_priv;
1148 vxge_assert((vpath_handle != NULL) && (txdlh != NULL) && (vaddr != 0) &&
1151 hldev = vp->vpath->hldev;
1153 vxge_hal_trace_log_fifo("==> %s:%s:%d",
1154 __FILE__, __func__, __LINE__);
1156 vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1157 "txdlh = 0x"VXGE_OS_STXFMT", vaddr = 0x"VXGE_OS_STXFMT", "
1158 "size = %d", (ptr_t) vpath_handle, (ptr_t) txdlh,
1159 (ptr_t) vaddr, size);
1161 fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1163 vxge_assert(fifo != NULL);
1165 txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
1168 used = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
1169 used += txdl_priv->align_dma_offset;
1171 if (used + (unsigned int)size > (unsigned int)fifo->align_size)
1172 return (VXGE_HAL_ERR_OUT_ALIGNED_FRAGS);
1174 vxge_os_memcpy((char *) txdl_priv->align_vaddr_start +
1175 txdl_priv->align_dma_offset, vaddr, size);
1177 fifo->stats->copied_frags++;
1179 txdl_priv->align_dma_offset += size;
1181 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0",
1182 __FILE__, __func__, __LINE__);
1183 return (VXGE_HAL_OK);
1187 * vxge_hal_fifo_txdl_buffer_finalize - Prepares a descriptor that contains the
1188 * single physically contiguous buffer.
1190 * @vpath_handle: Virtual path handle.
1191 * @txdlh: Descriptor handle.
1192 * @frag_idx: Index of the data buffer in the Txdl list.
1194 * This API in conjuction with vxge_hal_fifo_txdl_buffer_append() prepares
1195 * a descriptor that consists of a single physically contiguous buffer
1196 * which inturn contains the contents of one or more virtually contiguous
1197 * buffers received from the upper layer.
1199 * See Also: vxge_hal_fifo_txdl_buffer_append().
1202 vxge_hal_fifo_txdl_buffer_finalize(
1203 vxge_hal_vpath_h vpath_handle,
1204 vxge_hal_txdl_h txdlh,
1207 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1209 __hal_device_t *hldev;
1210 __hal_fifo_txdl_priv_t *txdl_priv;
1211 vxge_hal_fifo_txd_t *txdp;
1212 ptrdiff_t prev_boff;
1214 vxge_assert((vpath_handle != NULL) &&
1215 (txdlh != NULL) && (frag_idx != 0));
1217 hldev = vp->vpath->hldev;
1219 vxge_hal_trace_log_fifo("==> %s:%s:%d",
1220 __FILE__, __func__, __LINE__);
1222 vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1223 "txdlh = 0x"VXGE_OS_STXFMT", frag_idx = %d", (ptr_t) vpath_handle,
1224 (ptr_t) txdlh, frag_idx);
1226 fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1228 vxge_assert(fifo != NULL);
1230 txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
1231 txdp = (vxge_hal_fifo_txd_t *) txdlh + txdl_priv->frags;
1234 prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
1235 txdp->buffer_pointer = (u64) txdl_priv->align_dma_addr + prev_boff;
1237 VXGE_HAL_FIFO_TXD_BUFFER_SIZE(txdl_priv->align_dma_offset);
1238 txdl_priv->bytes_sent += (unsigned int)txdl_priv->align_dma_offset;
1239 fifo->stats->total_buffers++;
1240 fifo->stats->copied_buffers++;
1242 txdl_priv->align_used_frags++;
1244 #if defined(VXGE_OS_DMA_REQUIRES_SYNC)
1245 /* sync pre-mapped buffer */
1246 vxge_os_dma_sync(fifo->channel.pdev,
1247 txdl_priv->align_dma_handle,
1248 txdp->buffer_pointer,
1250 txdl_priv->align_dma_offset,
1251 VXGE_OS_DMA_DIR_TODEVICE);
1254 /* increment vaddr_start for the next buffer_append() iteration */
1255 txdl_priv->align_vaddr_start += txdl_priv->align_dma_offset;
1256 txdl_priv->align_dma_offset = 0;
1258 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0",
1259 __FILE__, __func__, __LINE__);
1263 * vxge_hal_fifo_txdl_new_frame_set - Start the new packet by setting TXDL flags
1264 * @vpath_handle: virtual path handle.
1265 * @txdlh: Descriptor handle.
1266 * @tagged: Is the frame tagged
1268 * This API is part of the preparation of the transmit descriptor for posting
1269 * (via vxge_hal_fifo_txdl_post()). This api is used to mark the end of previous
1270 * frame and start of a new frame.
1274 vxge_hal_fifo_txdl_new_frame_set(
1275 vxge_hal_vpath_h vpath_handle,
1276 vxge_hal_txdl_h txdlh,
1279 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1281 __hal_device_t *hldev;
1282 __hal_fifo_txdl_priv_t *txdl_priv;
1283 vxge_hal_fifo_txd_t *txdp;
1285 vxge_assert((vpath_handle != NULL) && (txdlh != NULL));
1287 hldev = vp->vpath->hldev;
1289 vxge_hal_trace_log_fifo("==> %s:%s:%d",
1290 __FILE__, __func__, __LINE__);
1292 vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1293 "txdlh = 0x"VXGE_OS_STXFMT", tagged = %d",
1294 (ptr_t) vpath_handle, (ptr_t) txdlh, tagged);
1296 fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1298 vxge_assert(fifo != NULL);
1300 txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
1302 txdp = (vxge_hal_fifo_txd_t *) txdlh + txdl_priv->frags;
1305 VXGE_HAL_FIFO_TXD_HOST_STEER(vp->vpath->vp_config->wire_port);
1306 txdp->control_0 |= VXGE_HAL_FIFO_TXD_GATHER_CODE(
1307 VXGE_HAL_FIFO_TXD_GATHER_CODE_FIRST);
1308 txdp->control_1 |= fifo->interrupt_type;
1309 txdp->control_1 |= VXGE_HAL_FIFO_TXD_INT_NUMBER(
1310 vp->vpath->tx_intr_num);
1312 txdp->control_1 |= VXGE_HAL_FIFO_TXD_NO_BW_LIMIT;
1313 if (txdl_priv->frags) {
1315 txdp = (vxge_hal_fifo_txd_t *) txdlh + (txdl_priv->frags - 1);
1317 txdp->control_0 |= VXGE_HAL_FIFO_TXD_GATHER_CODE(
1318 VXGE_HAL_FIFO_TXD_GATHER_CODE_LAST);
1322 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0",
1323 __FILE__, __func__, __LINE__);
1327 * vxge_hal_fifo_txdl_post - Post descriptor on the fifo channel.
1328 * @vpath_handle: Virtual path handle.
1329 * @txdlh: Descriptor obtained via vxge_hal_fifo_txdl_reserve()
1330 * @tagged: Is the frame tagged
1332 * Post descriptor on the 'fifo' type channel for transmission.
1333 * Prior to posting the descriptor should be filled in accordance with
1334 * Host/X3100 interface specification for a given service (LL, etc.).
1338 vxge_hal_fifo_txdl_post(
1339 vxge_hal_vpath_h vpath_handle,
1340 vxge_hal_txdl_h txdlh,
1344 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1346 __hal_device_t *hldev;
1347 __hal_fifo_txdl_priv_t *txdl_priv;
1348 vxge_hal_fifo_txd_t *txdp_last;
1349 vxge_hal_fifo_txd_t *txdp_first;
1351 #if defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1352 unsigned long flags = 0;
1356 vxge_assert((vpath_handle != NULL) && (txdlh != NULL));
1358 hldev = vp->vpath->hldev;
1360 vxge_hal_trace_log_fifo("==> %s:%s:%d",
1361 __FILE__, __func__, __LINE__);
1363 vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1364 "txdlh = 0x"VXGE_OS_STXFMT", tagged = %d",
1365 (ptr_t) vpath_handle, (ptr_t) txdlh, tagged);
1367 fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1369 vxge_assert(fifo != NULL);
1371 txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
1373 txdp_first = (vxge_hal_fifo_txd_t *) txdlh;
1374 txdp_first->control_0 |=
1375 VXGE_HAL_FIFO_TXD_HOST_STEER(vp->vpath->vp_config->wire_port);
1376 txdp_first->control_0 |=
1377 VXGE_HAL_FIFO_TXD_GATHER_CODE(VXGE_HAL_FIFO_TXD_GATHER_CODE_FIRST);
1378 txdp_first->control_1 |=
1379 VXGE_HAL_FIFO_TXD_INT_NUMBER(vp->vpath->tx_intr_num);
1380 txdp_first->control_1 |= fifo->interrupt_type;
1381 list_ptr = (u64) txdl_priv->dma_addr;
1383 txdp_first->control_1 |= VXGE_HAL_FIFO_TXD_NO_BW_LIMIT;
1388 (vxge_hal_fifo_txd_t *) txdlh + (txdl_priv->frags - 1);
1389 txdp_last->control_0 |=
1390 VXGE_HAL_FIFO_TXD_GATHER_CODE(VXGE_HAL_FIFO_TXD_GATHER_CODE_LAST);
1392 #if defined(VXGE_HAL_TX_MULTI_POST)
1393 vxge_os_spin_lock(&fifo->channel.post_lock);
1394 #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1395 vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags);
1398 txdp_first->control_0 |= VXGE_HAL_FIFO_TXD_LIST_OWN_ADAPTER;
1400 #if defined(VXGE_DEBUG_ASSERT)
1401 /* make sure device overwrites the t_code value on completion */
1402 txdp_first->control_0 |=
1403 VXGE_HAL_FIFO_TXD_T_CODE(VXGE_HAL_FIFO_TXD_T_CODE_UNUSED);
1406 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_TXDL_STREAMING)
1407 /* sync the TxDL to device */
1408 vxge_os_dma_sync(fifo->channel.pdev,
1409 txdl_priv->dma_handle,
1410 txdl_priv->dma_addr,
1411 txdl_priv->dma_offset,
1412 txdl_priv->frags << 5, /* sizeof(vxge_hal_fifo_txd_t) */
1413 VXGE_OS_DMA_DIR_TODEVICE);
1416 * we want touch dtr_arr in order with ownership bit set to HW
1418 __hal_channel_dtr_post(&fifo->channel, VXGE_HAL_FIFO_TXDL_INDEX(txdlh));
1420 __hal_non_offload_db_post(vpath_handle,
1422 txdl_priv->frags - 1,
1423 vp->vpath->vp_config->fifo.no_snoop_bits);
1425 #if defined(VXGE_HAL_FIFO_DUMP_TXD)
1426 vxge_hal_info_log_fifo(
1427 ""VXGE_OS_LLXFMT":"VXGE_OS_LLXFMT":"VXGE_OS_LLXFMT":"
1428 VXGE_OS_LLXFMT" dma "VXGE_OS_LLXFMT,
1429 txdp_first->control_0, txdp_first->control_1,
1430 txdp_first->buffer_pointer, VXGE_HAL_FIFO_TXDL_INDEX(txdp_first),
1431 txdl_priv->dma_addr);
1434 fifo->stats->total_posts++;
1435 fifo->stats->common_stats.usage_cnt++;
1436 if (fifo->stats->common_stats.usage_max <
1437 fifo->stats->common_stats.usage_cnt)
1438 fifo->stats->common_stats.usage_max =
1439 fifo->stats->common_stats.usage_cnt;
1441 #if defined(VXGE_HAL_TX_MULTI_POST)
1442 vxge_os_spin_unlock(&fifo->channel.post_lock);
1443 #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1444 vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags);
1447 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0",
1448 __FILE__, __func__, __LINE__);
1452 * vxge_hal_fifo_is_next_txdl_completed - Checks if the next txdl is completed
1453 * @vpath_handle: Virtual path handle.
1456 vxge_hal_fifo_is_next_txdl_completed(vxge_hal_vpath_h vpath_handle)
1458 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1460 __hal_device_t *hldev;
1461 vxge_hal_fifo_txd_t *txdp;
1462 vxge_hal_txdl_h txdlh;
1463 vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1465 #if defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1466 unsigned long flags = 0;
1471 vxge_assert(vpath_handle != NULL);
1473 hldev = vp->vpath->hldev;
1475 vxge_hal_trace_log_fifo("==> %s:%s:%d",
1476 __FILE__, __func__, __LINE__);
1478 vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT,
1479 (ptr_t) vpath_handle);
1481 fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1483 vxge_assert(fifo != NULL);
1485 #if defined(VXGE_HAL_TX_MULTI_POST)
1486 vxge_os_spin_lock(&fifo->channel.post_lock);
1487 #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1488 vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags);
1491 __hal_channel_dtr_try_complete(&fifo->channel, &txdlh);
1493 txdp = (vxge_hal_fifo_txd_t *) txdlh;
1494 if ((txdp != NULL) &&
1495 (!(txdp->control_0 & VXGE_HAL_FIFO_TXD_LIST_OWN_ADAPTER))) {
1496 status = VXGE_HAL_OK;
1499 #if defined(VXGE_HAL_TX_MULTI_POST)
1500 vxge_os_spin_unlock(&fifo->channel.post_lock);
1501 #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1502 vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags);
1505 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d",
1506 __FILE__, __func__, __LINE__, status);
1508 /* no more completions */
1513 * vxge_hal_fifo_txdl_next_completed - Retrieve next completed descriptor.
1514 * @vpath_handle: Virtual path handle.
1515 * @txdlh: Descriptor handle. Returned by HAL.
1516 * @txdl_priv: Buffer to return the pointer to per txdl space
1517 * @t_code: Transfer code, as per X3100 User Guide,
1518 * Transmit Descriptor Format.
1521 * Retrieve the _next_ completed descriptor.
1522 * HAL uses channel callback (*vxge_hal_channel_callback_f) to notifiy
1523 * upper-layer driver (ULD) of new completed descriptors. After that
1524 * the ULD can use vxge_hal_fifo_txdl_next_completed to retrieve the rest
1525 * completions (the very first completion is passed by HAL via
1526 * vxge_hal_channel_callback_f).
1528 * Implementation-wise, the upper-layer driver is free to call
1529 * vxge_hal_fifo_txdl_next_completed either immediately from inside the
1530 * channel callback, or in a deferred fashion and separate (from HAL)
1533 * Non-zero @t_code means failure to process the descriptor.
1534 * The failure could happen, for instance, when the link is
1535 * down, in which case X3100 completes the descriptor because it
1536 * is not able to send the data out.
1538 * For details please refer to X3100 User Guide.
1540 * Returns: VXGE_HAL_OK - success.
1541 * VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1542 * are currently available for processing.
1546 vxge_hal_fifo_txdl_next_completed(
1547 vxge_hal_vpath_h vpath_handle,
1548 vxge_hal_txdl_h * txdlh,
1550 vxge_hal_fifo_tcode_e * t_code)
1553 __hal_device_t *hldev;
1554 vxge_hal_fifo_txd_t *txdp;
1556 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_TXDL_STREAMING)
1557 __hal_fifo_txdl_priv_t *priv;
1560 #if defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1561 unsigned long flags = 0;
1565 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1566 vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1568 vxge_assert((vpath_handle != NULL) &&
1569 (txdlh != NULL) && (t_code != NULL));
1571 hldev = vp->vpath->hldev;
1573 vxge_hal_trace_log_fifo("==> %s:%s:%d",
1574 __FILE__, __func__, __LINE__);
1576 vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1577 "txdlh = 0x"VXGE_OS_STXFMT", t_code = 0x"VXGE_OS_STXFMT,
1578 (ptr_t) vpath_handle, (ptr_t) txdlh, (ptr_t) t_code);
1580 fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1582 vxge_assert(fifo != NULL);
1586 #if defined(VXGE_HAL_TX_MULTI_POST)
1587 vxge_os_spin_lock(&fifo->channel.post_lock);
1588 #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1589 vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags);
1592 __hal_channel_dtr_try_complete(&fifo->channel, txdlh);
1594 txdp = (vxge_hal_fifo_txd_t *) * txdlh;
1597 #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_TXDL_STREAMING)
1598 priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp);
1601 * sync TxDL to read the ownership
1603 * Note: 16bytes means Control_1 & Control_2
1605 vxge_os_dma_sync(fifo->channel.pdev,
1610 VXGE_OS_DMA_DIR_FROMDEVICE);
1613 /* check whether host owns it */
1614 if (!(txdp->control_0 & VXGE_HAL_FIFO_TXD_LIST_OWN_ADAPTER)) {
1616 __hal_channel_dtr_complete(&fifo->channel);
1618 *txdl_priv = VXGE_HAL_FIFO_ULD_PRIV(fifo, txdp);
1620 *t_code = (vxge_hal_fifo_tcode_e)
1621 VXGE_HAL_FIFO_TXD_T_CODE_GET(txdp->control_0);
1623 if (fifo->stats->common_stats.usage_cnt > 0)
1624 fifo->stats->common_stats.usage_cnt--;
1626 status = VXGE_HAL_OK;
1630 /* no more completions */
1631 #if defined(VXGE_HAL_TX_MULTI_POST)
1632 vxge_os_spin_unlock(&fifo->channel.post_lock);
1633 #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1634 vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags);
1637 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d",
1638 __FILE__, __func__, __LINE__, status);
1644 * vxge_hal_fifo_handle_tcode - Handle transfer code.
1645 * @vpath_handle: Virtual Path handle.
1646 * @txdlh: Descriptor handle.
1647 * @t_code: One of the enumerated (and documented in the X3100 user guide)
1650 * Handle descriptor's transfer code. The latter comes with each completed
1653 * Returns: one of the vxge_hal_status_e {} enumerated types.
1654 * VXGE_HAL_OK - for success.
1655 * VXGE_HAL_ERR_CRITICAL - when encounters critical error.
1658 vxge_hal_fifo_handle_tcode(
1659 vxge_hal_vpath_h vpath_handle,
1660 vxge_hal_txdl_h txdlh,
1661 vxge_hal_fifo_tcode_e t_code)
1663 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1664 __hal_device_t *hldev;
1666 vxge_assert((vpath_handle != NULL) && (txdlh != NULL));
1668 hldev = vp->vpath->hldev;
1670 vxge_hal_trace_log_fifo("==> %s:%s:%d",
1671 __FILE__, __func__, __LINE__);
1673 vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1674 "txdlh = 0x"VXGE_OS_STXFMT", t_code = 0x%d",
1675 (ptr_t) vpath_handle, (ptr_t) txdlh, t_code);
1677 switch ((t_code & 0x7)) {
1679 /* 000: Transfer operation completed successfully. */
1683 * 001: a PCI read transaction (either TxD or frame data)
1684 * returned with corrupt data.
1688 /* 010: a PCI read transaction was returned with no data. */
1692 * 011: The host attempted to send either a frame or LSO
1693 * MSS that was too long (>9800B).
1698 * 100: Error detected during TCP/UDP Large Send
1699 * Offload operation, due to improper header template,
1700 * unsupported protocol, etc.
1704 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d",
1705 __FILE__, __func__, __LINE__, VXGE_HAL_ERR_INVALID_TCODE);
1706 return (VXGE_HAL_ERR_INVALID_TCODE);
1709 vp->vpath->sw_stats->fifo_stats.txd_t_code_err_cnt[t_code]++;
1711 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d",
1712 __FILE__, __func__, __LINE__, VXGE_HAL_OK);
1713 return (VXGE_HAL_OK);
1717 * __hal_fifo_txdl_free_many - Free the fragments
1719 * @txdp: Poniter to a TxD
1720 * @list_size: List size
1721 * @frags: Number of fragments
1723 * This routinf frees the fragments in a txdl
1726 __hal_fifo_txdl_free_many(
1728 vxge_hal_fifo_txd_t * txdp,
1732 __hal_fifo_txdl_priv_t *current_txdl_priv;
1733 __hal_fifo_txdl_priv_t *next_txdl_priv;
1734 u32 invalid_frags = frags % list_size;
1735 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) fifo->channel.vph;
1736 __hal_device_t *hldev;
1738 vxge_assert((fifo != NULL) && (txdp != NULL));
1740 hldev = vp->vpath->hldev;
1742 vxge_hal_trace_log_fifo("==> %s:%s:%d",
1743 __FILE__, __func__, __LINE__);
1745 vxge_hal_trace_log_fifo(
1746 "fifo = 0x"VXGE_OS_STXFMT", txdp = 0x"VXGE_OS_STXFMT", "
1747 "list_size = %d, frags = %d", (ptr_t) fifo, (ptr_t) txdp,
1750 if (invalid_frags) {
1751 vxge_hal_trace_log_fifo(
1752 "freeing corrupt txdlh 0x"VXGE_OS_STXFMT", "
1753 "fragments %d list size %d",
1754 (ptr_t) txdp, frags, list_size);
1755 vxge_assert(invalid_frags == 0);
1758 vxge_hal_trace_log_fifo("freeing linked txdlh 0x"VXGE_OS_STXFMT
1759 ", " "fragments %d list size %d",
1760 (ptr_t) txdp, frags, list_size);
1761 current_txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp);
1762 #if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_OS_MEMORY_CHECK)
1763 current_txdl_priv->allocated = 0;
1765 __hal_channel_dtr_free(&fifo->channel,
1766 VXGE_HAL_FIFO_TXDL_INDEX(txdp));
1767 next_txdl_priv = current_txdl_priv->next_txdl_priv;
1770 if (next_txdl_priv) {
1771 current_txdl_priv->next_txdl_priv = NULL;
1772 txdp = next_txdl_priv->first_txdp;
1774 vxge_hal_trace_log_fifo(
1775 "freed linked txdlh fragments %d list size %d",
1781 vxge_assert(frags == 0);
1783 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0",
1784 __FILE__, __func__, __LINE__);
1788 * vxge_hal_fifo_txdl_free - Free descriptor.
1789 * @vpath_handle: Virtual path handle.
1790 * @txdlh: Descriptor handle.
1792 * Free the reserved descriptor. This operation is "symmetrical" to
1793 * vxge_hal_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1796 * After free-ing (see vxge_hal_fifo_txdl_free()) the descriptor again can
1799 * - reserved (vxge_hal_fifo_txdl_reserve);
1801 * - posted (vxge_hal_fifo_txdl_post);
1803 * - completed (vxge_hal_fifo_txdl_next_completed);
1805 * - and recycled again (vxge_hal_fifo_txdl_free).
1807 * For alternative state transitions and more details please refer to
1812 vxge_hal_fifo_txdl_free(
1813 vxge_hal_vpath_h vpath_handle,
1814 vxge_hal_txdl_h txdlh)
1816 __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle;
1818 __hal_device_t *hldev;
1819 __hal_fifo_txdl_priv_t *txdl_priv;
1822 #if defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1826 vxge_assert((vpath_handle != NULL) && (txdlh != NULL));
1828 hldev = vp->vpath->hldev;
1830 vxge_hal_trace_log_fifo("==> %s:%s:%d",
1831 __FILE__, __func__, __LINE__);
1833 vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", "
1834 "txdlh = 0x"VXGE_OS_STXFMT, (ptr_t) vpath_handle, (ptr_t) txdlh);
1836 fifo = (__hal_fifo_t *) vp->vpath->fifoh;
1838 vxge_assert(fifo != NULL);
1840 txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh);
1842 max_frags = fifo->config->max_frags;
1844 #if defined(VXGE_HAL_TX_MULTI_POST)
1845 vxge_os_spin_lock(&fifo->channel.post_lock);
1846 #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1847 vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags);
1850 if (txdl_priv->alloc_frags > max_frags) {
1851 vxge_hal_fifo_txd_t *dang_txdp = (vxge_hal_fifo_txd_t *)
1852 txdl_priv->dang_txdl;
1853 u32 dang_frags = txdl_priv->dang_frags;
1854 u32 alloc_frags = txdl_priv->alloc_frags;
1855 txdl_priv->dang_txdl = NULL;
1856 txdl_priv->dang_frags = 0;
1857 txdl_priv->alloc_frags = 0;
1858 /* txdlh must have a linked list of txdlh */
1859 vxge_assert(txdl_priv->next_txdl_priv);
1861 /* free any dangling txdlh first */
1863 vxge_hal_info_log_fifo(
1864 "freeing dangled txdlh 0x"VXGE_OS_STXFMT" for %d "
1865 "fragments", (ptr_t) dang_txdp, dang_frags);
1866 __hal_fifo_txdl_free_many(fifo, dang_txdp,
1867 max_frags, dang_frags);
1870 /* now free the reserved txdlh list */
1871 vxge_hal_info_log_fifo(
1872 "freeing txdlh 0x"VXGE_OS_STXFMT" list of %d fragments",
1873 (ptr_t) txdlh, alloc_frags);
1874 __hal_fifo_txdl_free_many(fifo,
1875 (vxge_hal_fifo_txd_t *) txdlh, max_frags,
1878 __hal_channel_dtr_free(&fifo->channel,
1879 VXGE_HAL_FIFO_TXDL_INDEX(txdlh));
1882 fifo->channel.poll_bytes += txdl_priv->bytes_sent;
1884 #if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_OS_MEMORY_CHECK)
1885 txdl_priv->allocated = 0;
1888 #if defined(VXGE_HAL_TX_MULTI_POST)
1889 vxge_os_spin_unlock(&fifo->channel.post_lock);
1890 #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ)
1891 vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags);
1894 vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0",
1895 __FILE__, __func__, __LINE__);