1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2003-2014, 2018-2022 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
7 #include <linux/sched.h>
8 #include <linux/wait.h>
14 #include "iwl-op-mode.h"
15 #include "iwl-context-info-gen3.h"
17 /******************************************************************************
21 ******************************************************************************/
24 * Rx theory of operation
26 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
27 * each of which point to Receive Buffers to be filled by the NIC. These get
28 * used not only for Rx frames, but for any command response or notification
29 * from the NIC. The driver and NIC manage the Rx buffers by means
30 * of indexes into the circular buffer.
33 * The host/firmware share two index registers for managing the Rx buffers.
35 * The READ index maps to the first position that the firmware may be writing
36 * to -- the driver can read up to (but not including) this position and get
38 * The READ index is managed by the firmware once the card is enabled.
40 * The WRITE index maps to the last position the driver has read from -- the
41 * position preceding WRITE is the last slot the firmware can place a packet.
43 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
46 * During initialization, the host sets up the READ queue position to the first
47 * INDEX position, and WRITE to the last (READ - 1 wrapped)
49 * When the firmware places a packet in a buffer, it will advance the READ index
50 * and fire the RX interrupt. The driver can then query the READ index and
51 * process as many packets as possible, moving the WRITE index forward as it
52 * resets the Rx queue buffers with new memory.
54 * The management in the driver is as follows:
55 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
56 * When the interrupt handler is called, the request is processed.
57 * The page is either stolen - transferred to the upper layer
58 * or reused - added immediately to the iwl->rxq->rx_free list.
59 * + When the page is stolen - the driver updates the matching queue's used
60 * count, detaches the RBD and transfers it to the queue used list.
61 * When there are two used RBDs - they are transferred to the allocator empty
62 * list. Work is then scheduled for the allocator to start allocating
64 * When there are another 6 used RBDs - they are transferred to the allocator
65 * empty list and the driver tries to claim the pre-allocated buffers and
66 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
68 * When there are 8+ buffers in the free list - either from allocation or from
69 * 8 reused unstolen pages - restock is called to update the FW and indexes.
70 * + In order to make sure the allocator always has RBDs to use for allocation
71 * the allocator has initial pool in the size of num_queues*(8-2) - the
72 * maximum missing RBDs per allocation request (request posted with 2
73 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
74 * The queues supplies the recycle of the rest of the RBDs.
75 * + A received packet is processed and handed to the kernel network stack,
76 * detached from the iwl->rxq. The driver 'processed' index is updated.
77 * + If there are no allocated buffers in iwl->rxq->rx_free,
78 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
79 * If there were enough free buffers and RX_STALLED is set it is cleared.
84 * iwl_rxq_alloc() Allocates rx_free
85 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
86 * iwl_pcie_rxq_restock.
87 * Used only during initialization.
88 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
89 * queue, updates firmware pointers, and updates
91 * iwl_pcie_rx_allocator() Background work for allocating pages.
93 * -- enable interrupts --
94 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
95 * READ INDEX, detaching the SKB from the pool.
96 * Moves the packet buffer from queue to rx_used.
97 * Posts and claims requests to the allocator.
98 * Calls iwl_pcie_rxq_restock to refill any empty
104 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
106 * Regular Receive interrupt:
108 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
109 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
111 * rxq.queue -> rxq.rx_free -> rxq.queue
117 * iwl_rxq_space - Return number of free slots available in queue.
119 static int iwl_rxq_space(const struct iwl_rxq *rxq)
121 /* Make sure rx queue size is a power of 2 */
122 WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
125 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
126 * between empty and completely full queues.
127 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
128 * defined for negative dividends.
130 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
134 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
136 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
138 return cpu_to_le32((u32)(dma_addr >> 8));
142 * iwl_pcie_rx_stop - stops the Rx DMA
144 int iwl_pcie_rx_stop(struct iwl_trans *trans)
146 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
147 /* TODO: remove this once fw does it */
148 iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
149 return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
150 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
151 } else if (trans->trans_cfg->mq_rx_supported) {
152 iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
153 return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
154 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
156 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
157 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
158 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
164 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
166 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
171 lockdep_assert_held(&rxq->lock);
174 * explicitly wake up the NIC if:
175 * 1. shadow registers aren't enabled
176 * 2. there is a chance that the NIC is asleep
178 if (!trans->trans_cfg->base_params->shadow_reg_enable &&
179 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
180 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
182 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
183 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
185 iwl_set_bit(trans, CSR_GP_CNTRL,
186 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
187 rxq->need_update = true;
192 rxq->write_actual = round_down(rxq->write, 8);
193 if (!trans->trans_cfg->mq_rx_supported)
194 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
195 else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
196 iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
197 HBUS_TARG_WRPTR_RX_Q(rxq->id));
199 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
203 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
205 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
208 for (i = 0; i < trans->num_rx_queues; i++) {
209 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
211 if (!rxq->need_update)
213 spin_lock_bh(&rxq->lock);
214 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
215 rxq->need_update = false;
216 spin_unlock_bh(&rxq->lock);
220 static void iwl_pcie_restock_bd(struct iwl_trans *trans,
222 struct iwl_rx_mem_buffer *rxb)
224 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
225 struct iwl_rx_transfer_desc *bd = rxq->bd;
227 BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
229 bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
230 bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
232 __le64 *bd = rxq->bd;
234 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
237 #if defined(__linux__)
238 IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
239 #elif defined(__FreeBSD__)
240 IWL_DEBUG_PCI_RW(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
241 (u32)rxb->vid, rxq->id, rxq->write);
246 * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
248 static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
251 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
252 struct iwl_rx_mem_buffer *rxb;
255 * If the device isn't enabled - no need to try to add buffers...
256 * This can happen when we stop the device and still have an interrupt
257 * pending. We stop the APM before we sync the interrupts because we
258 * have to (see comment there). On the other hand, since the APM is
259 * stopped, we cannot access the HW (in particular not prph).
260 * So don't try to restock if the APM has been already stopped.
262 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
265 spin_lock_bh(&rxq->lock);
266 while (rxq->free_count) {
267 /* Get next free Rx buffer, remove from free list */
268 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
270 list_del(&rxb->list);
271 rxb->invalid = false;
272 /* some low bits are expected to be unset (depending on hw) */
273 WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
274 /* Point to Rx buffer via next RBD in circular buffer */
275 iwl_pcie_restock_bd(trans, rxq, rxb);
276 rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
279 spin_unlock_bh(&rxq->lock);
282 * If we've added more space for the firmware to place data, tell it.
283 * Increment device's write pointer in multiples of 8.
285 if (rxq->write_actual != (rxq->write & ~0x7)) {
286 spin_lock_bh(&rxq->lock);
287 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
288 spin_unlock_bh(&rxq->lock);
293 * iwl_pcie_rxsq_restock - restock implementation for single queue rx
295 static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
298 struct iwl_rx_mem_buffer *rxb;
301 * If the device isn't enabled - not need to try to add buffers...
302 * This can happen when we stop the device and still have an interrupt
303 * pending. We stop the APM before we sync the interrupts because we
304 * have to (see comment there). On the other hand, since the APM is
305 * stopped, we cannot access the HW (in particular not prph).
306 * So don't try to restock if the APM has been already stopped.
308 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
311 spin_lock_bh(&rxq->lock);
312 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
313 __le32 *bd = (__le32 *)rxq->bd;
314 /* The overwritten rxb must be a used one */
315 rxb = rxq->queue[rxq->write];
316 BUG_ON(rxb && rxb->page);
318 /* Get next free Rx buffer, remove from free list */
319 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
321 list_del(&rxb->list);
322 rxb->invalid = false;
324 /* Point to Rx buffer via next RBD in circular buffer */
325 bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
326 rxq->queue[rxq->write] = rxb;
327 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
330 spin_unlock_bh(&rxq->lock);
332 /* If we've added more space for the firmware to place data, tell it.
333 * Increment device's write pointer in multiples of 8. */
334 if (rxq->write_actual != (rxq->write & ~0x7)) {
335 spin_lock_bh(&rxq->lock);
336 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
337 spin_unlock_bh(&rxq->lock);
342 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
344 * If there are slots in the RX queue that need to be restocked,
345 * and we have free pre-allocated buffers, fill the ranks as much
346 * as we can, pulling from rx_free.
348 * This moves the 'write' index forward to catch up with 'processed', and
349 * also updates the memory address in the firmware to reference the new
353 void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
355 if (trans->trans_cfg->mq_rx_supported)
356 iwl_pcie_rxmq_restock(trans, rxq);
358 iwl_pcie_rxsq_restock(trans, rxq);
362 * iwl_pcie_rx_alloc_page - allocates and returns a page.
365 static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
366 u32 *offset, gfp_t priority)
368 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
369 unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
370 unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
372 gfp_t gfp_mask = priority;
374 if (trans_pcie->rx_page_order > 0)
375 gfp_mask |= __GFP_COMP;
377 if (trans_pcie->alloc_page) {
378 spin_lock_bh(&trans_pcie->alloc_page_lock);
380 if (trans_pcie->alloc_page) {
381 *offset = trans_pcie->alloc_page_used;
382 page = trans_pcie->alloc_page;
383 trans_pcie->alloc_page_used += rbsize;
384 if (trans_pcie->alloc_page_used >= allocsize)
385 trans_pcie->alloc_page = NULL;
388 spin_unlock_bh(&trans_pcie->alloc_page_lock);
391 spin_unlock_bh(&trans_pcie->alloc_page_lock);
394 /* Alloc a new receive buffer */
395 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
398 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
399 trans_pcie->rx_page_order);
401 * Issue an error if we don't have enough pre-allocated
404 if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
406 "Failed to alloc_pages\n");
410 if (2 * rbsize <= allocsize) {
411 spin_lock_bh(&trans_pcie->alloc_page_lock);
412 if (!trans_pcie->alloc_page) {
414 trans_pcie->alloc_page = page;
415 trans_pcie->alloc_page_used = rbsize;
417 spin_unlock_bh(&trans_pcie->alloc_page_lock);
425 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
427 * A used RBD is an Rx buffer that has been given to the stack. To use it again
428 * a page must be allocated and the RBD must point to the page. This function
429 * doesn't change the HW pointer but handles the list of pages that is used by
430 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
433 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
436 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
437 struct iwl_rx_mem_buffer *rxb;
443 spin_lock_bh(&rxq->lock);
444 if (list_empty(&rxq->rx_used)) {
445 spin_unlock_bh(&rxq->lock);
448 spin_unlock_bh(&rxq->lock);
450 page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
454 spin_lock_bh(&rxq->lock);
456 if (list_empty(&rxq->rx_used)) {
457 spin_unlock_bh(&rxq->lock);
458 __free_pages(page, trans_pcie->rx_page_order);
461 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
463 list_del(&rxb->list);
464 spin_unlock_bh(&rxq->lock);
468 rxb->offset = offset;
469 /* Get physical address of the RB */
471 dma_map_page(trans->dev, page, rxb->offset,
472 trans_pcie->rx_buf_bytes,
474 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
476 spin_lock_bh(&rxq->lock);
477 list_add(&rxb->list, &rxq->rx_used);
478 spin_unlock_bh(&rxq->lock);
479 __free_pages(page, trans_pcie->rx_page_order);
483 spin_lock_bh(&rxq->lock);
485 list_add_tail(&rxb->list, &rxq->rx_free);
488 spin_unlock_bh(&rxq->lock);
492 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
494 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
497 if (!trans_pcie->rx_pool)
500 for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
501 if (!trans_pcie->rx_pool[i].page)
503 dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
504 trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
505 __free_pages(trans_pcie->rx_pool[i].page,
506 trans_pcie->rx_page_order);
507 trans_pcie->rx_pool[i].page = NULL;
512 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
514 * Allocates for each received request 8 pages
515 * Called as a scheduled work item.
517 static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
519 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
520 struct iwl_rb_allocator *rba = &trans_pcie->rba;
521 struct list_head local_empty;
522 int pending = atomic_read(&rba->req_pending);
524 IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
526 /* If we were scheduled - there is at least one request */
527 spin_lock_bh(&rba->lock);
528 /* swap out the rba->rbd_empty to a local list */
529 list_replace_init(&rba->rbd_empty, &local_empty);
530 spin_unlock_bh(&rba->lock);
534 LIST_HEAD(local_allocated);
535 gfp_t gfp_mask = GFP_KERNEL;
537 /* Do not post a warning if there are only a few requests */
538 if (pending < RX_PENDING_WATERMARK)
539 gfp_mask |= __GFP_NOWARN;
541 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
542 struct iwl_rx_mem_buffer *rxb;
545 /* List should never be empty - each reused RBD is
546 * returned to the list, and initial pool covers any
547 * possible gap between the time the page is allocated
548 * to the time the RBD is added.
550 BUG_ON(list_empty(&local_empty));
551 /* Get the first rxb from the rbd list */
552 rxb = list_first_entry(&local_empty,
553 struct iwl_rx_mem_buffer, list);
556 /* Alloc a new receive buffer */
557 page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
563 /* Get physical address of the RB */
564 rxb->page_dma = dma_map_page(trans->dev, page,
566 trans_pcie->rx_buf_bytes,
568 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
570 __free_pages(page, trans_pcie->rx_page_order);
574 /* move the allocated entry to the out list */
575 list_move(&rxb->list, &local_allocated);
579 atomic_dec(&rba->req_pending);
583 pending = atomic_read(&rba->req_pending);
586 "Got more pending allocation requests = %d\n",
590 spin_lock_bh(&rba->lock);
591 /* add the allocated rbds to the allocator allocated list */
592 list_splice_tail(&local_allocated, &rba->rbd_allocated);
593 /* get more empty RBDs for current pending requests */
594 list_splice_tail_init(&rba->rbd_empty, &local_empty);
595 spin_unlock_bh(&rba->lock);
597 atomic_inc(&rba->req_ready);
601 spin_lock_bh(&rba->lock);
602 /* return unused rbds to the allocator empty list */
603 list_splice_tail(&local_empty, &rba->rbd_empty);
604 spin_unlock_bh(&rba->lock);
606 IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
610 * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
612 .* Called by queue when the queue posted allocation request and
613 * has freed 8 RBDs in order to restock itself.
614 * This function directly moves the allocated RBs to the queue's ownership
615 * and updates the relevant counters.
617 static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
620 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
621 struct iwl_rb_allocator *rba = &trans_pcie->rba;
624 lockdep_assert_held(&rxq->lock);
627 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
628 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
629 * function will return early, as there are no ready requests.
630 * atomic_dec_if_positive will perofrm the *actual* decrement only if
631 * req_ready > 0, i.e. - there are ready requests and the function
632 * hands one request to the caller.
634 if (atomic_dec_if_positive(&rba->req_ready) < 0)
637 spin_lock(&rba->lock);
638 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
639 /* Get next free Rx buffer, remove it from free list */
640 struct iwl_rx_mem_buffer *rxb =
641 list_first_entry(&rba->rbd_allocated,
642 struct iwl_rx_mem_buffer, list);
644 list_move(&rxb->list, &rxq->rx_free);
646 spin_unlock(&rba->lock);
648 rxq->used_count -= RX_CLAIM_REQ_ALLOC;
649 rxq->free_count += RX_CLAIM_REQ_ALLOC;
652 void iwl_pcie_rx_allocator_work(struct work_struct *data)
654 struct iwl_rb_allocator *rba_p =
655 container_of(data, struct iwl_rb_allocator, rx_alloc);
656 struct iwl_trans_pcie *trans_pcie =
657 container_of(rba_p, struct iwl_trans_pcie, rba);
659 iwl_pcie_rx_allocator(trans_pcie->trans);
662 static int iwl_pcie_free_bd_size(struct iwl_trans *trans)
664 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
665 return sizeof(struct iwl_rx_transfer_desc);
667 return trans->trans_cfg->mq_rx_supported ?
668 sizeof(__le64) : sizeof(__le32);
671 static int iwl_pcie_used_bd_size(struct iwl_trans *trans)
673 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
674 return sizeof(struct iwl_rx_completion_desc_bz);
676 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
677 return sizeof(struct iwl_rx_completion_desc);
679 return sizeof(__le32);
682 static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
685 int free_size = iwl_pcie_free_bd_size(trans);
688 dma_free_coherent(trans->dev,
689 free_size * rxq->queue_size,
690 rxq->bd, rxq->bd_dma);
694 rxq->rb_stts_dma = 0;
698 dma_free_coherent(trans->dev,
699 iwl_pcie_used_bd_size(trans) *
701 rxq->used_bd, rxq->used_bd_dma);
702 rxq->used_bd_dma = 0;
706 static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
709 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
710 struct device *dev = trans->dev;
713 bool use_rx_td = (trans->trans_cfg->device_family >=
714 IWL_DEVICE_FAMILY_AX210);
715 size_t rb_stts_size = use_rx_td ? sizeof(__le16) :
716 sizeof(struct iwl_rb_status);
718 spin_lock_init(&rxq->lock);
719 if (trans->trans_cfg->mq_rx_supported)
720 rxq->queue_size = trans->cfg->num_rbds;
722 rxq->queue_size = RX_QUEUE_SIZE;
724 free_size = iwl_pcie_free_bd_size(trans);
727 * Allocate the circular buffer of Read Buffer Descriptors
730 rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
731 &rxq->bd_dma, GFP_KERNEL);
735 if (trans->trans_cfg->mq_rx_supported) {
736 rxq->used_bd = dma_alloc_coherent(dev,
737 iwl_pcie_used_bd_size(trans) *
745 rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
747 trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
752 for (i = 0; i < trans->num_rx_queues; i++) {
753 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
755 iwl_pcie_free_rxq_dma(trans, rxq);
761 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
763 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
764 struct iwl_rb_allocator *rba = &trans_pcie->rba;
766 size_t rb_stts_size = trans->trans_cfg->device_family >=
767 IWL_DEVICE_FAMILY_AX210 ?
768 sizeof(__le16) : sizeof(struct iwl_rb_status);
770 if (WARN_ON(trans_pcie->rxq))
773 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
775 trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
776 sizeof(trans_pcie->rx_pool[0]),
778 trans_pcie->global_table =
779 kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
780 sizeof(trans_pcie->global_table[0]),
782 if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
783 !trans_pcie->global_table) {
788 spin_lock_init(&rba->lock);
791 * Allocate the driver's pointer to receive buffer status.
792 * Allocate for all queues continuously (HW requirement).
794 trans_pcie->base_rb_stts =
795 dma_alloc_coherent(trans->dev,
796 rb_stts_size * trans->num_rx_queues,
797 &trans_pcie->base_rb_stts_dma,
799 if (!trans_pcie->base_rb_stts) {
804 for (i = 0; i < trans->num_rx_queues; i++) {
805 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
808 ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
815 if (trans_pcie->base_rb_stts) {
816 dma_free_coherent(trans->dev,
817 rb_stts_size * trans->num_rx_queues,
818 trans_pcie->base_rb_stts,
819 trans_pcie->base_rb_stts_dma);
820 trans_pcie->base_rb_stts = NULL;
821 trans_pcie->base_rb_stts_dma = 0;
823 kfree(trans_pcie->rx_pool);
824 trans_pcie->rx_pool = NULL;
825 kfree(trans_pcie->global_table);
826 trans_pcie->global_table = NULL;
827 kfree(trans_pcie->rxq);
828 trans_pcie->rxq = NULL;
833 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
835 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
837 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
839 switch (trans_pcie->rx_buf_size) {
841 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
844 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
847 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
851 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
854 if (!iwl_trans_grab_nic_access(trans))
858 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
859 /* reset and flush pointers */
860 iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
861 iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
862 iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
864 /* Reset driver's Rx queue write index */
865 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
867 /* Tell device where to find RBD circular buffer in DRAM */
868 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
869 (u32)(rxq->bd_dma >> 8));
871 /* Tell device where in DRAM to update its Rx status */
872 iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
873 rxq->rb_stts_dma >> 4);
876 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
877 * the credit mechanism in 5000 HW RX FIFO
878 * Direct rx interrupts to hosts
879 * Rx buffer size 4 or 8k or 12k
883 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
884 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
885 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
886 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
888 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
889 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
891 iwl_trans_release_nic_access(trans);
893 /* Set interrupt coalescing timer to default (2048 usecs) */
894 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
896 /* W/A for interrupt coalescing bug in 7260 and 3160 */
897 if (trans->cfg->host_interrupt_operation_mode)
898 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
901 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
903 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
904 u32 rb_size, enabled = 0;
907 switch (trans_pcie->rx_buf_size) {
909 rb_size = RFH_RXF_DMA_RB_SIZE_2K;
912 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
915 rb_size = RFH_RXF_DMA_RB_SIZE_8K;
918 rb_size = RFH_RXF_DMA_RB_SIZE_12K;
922 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
925 if (!iwl_trans_grab_nic_access(trans))
929 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
930 /* disable free amd used rx queue operation */
931 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
933 for (i = 0; i < trans->num_rx_queues; i++) {
934 /* Tell device where to find RBD free table in DRAM */
935 iwl_write_prph64_no_grab(trans,
936 RFH_Q_FRBDCB_BA_LSB(i),
937 trans_pcie->rxq[i].bd_dma);
938 /* Tell device where to find RBD used table in DRAM */
939 iwl_write_prph64_no_grab(trans,
940 RFH_Q_URBDCB_BA_LSB(i),
941 trans_pcie->rxq[i].used_bd_dma);
942 /* Tell device where in DRAM to update its Rx status */
943 iwl_write_prph64_no_grab(trans,
944 RFH_Q_URBD_STTS_WPTR_LSB(i),
945 trans_pcie->rxq[i].rb_stts_dma);
946 /* Reset device indice tables */
947 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
948 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
949 iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
951 enabled |= BIT(i) | BIT(i + 16);
956 * Rx buffer size 4 or 8k or 12k
958 * Drop frames that exceed RB size
961 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
962 RFH_DMA_EN_ENABLE_VAL | rb_size |
963 RFH_RXF_DMA_MIN_RB_4_8 |
964 RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
965 RFH_RXF_DMA_RBDCB_SIZE_512);
968 * Activate DMA snooping.
969 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
972 iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
973 RFH_GEN_CFG_RFH_DMA_SNOOP |
974 RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
975 RFH_GEN_CFG_SERVICE_DMA_SNOOP |
976 RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
977 trans->trans_cfg->integrated ?
978 RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
979 RFH_GEN_CFG_RB_CHUNK_SIZE_128));
980 /* Enable the relevant rx queues */
981 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
983 iwl_trans_release_nic_access(trans);
985 /* Set interrupt coalescing timer to default (2048 usecs) */
986 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
989 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
991 lockdep_assert_held(&rxq->lock);
993 INIT_LIST_HEAD(&rxq->rx_free);
994 INIT_LIST_HEAD(&rxq->rx_used);
999 static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);
1001 static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
1003 struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
1004 struct iwl_trans_pcie *trans_pcie;
1005 struct iwl_trans *trans;
1008 trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
1009 trans = trans_pcie->trans;
1011 ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
1013 IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n",
1014 rxq->id, ret, budget);
1017 spin_lock(&trans_pcie->irq_lock);
1018 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1019 _iwl_enable_interrupts(trans);
1020 spin_unlock(&trans_pcie->irq_lock);
1022 napi_complete_done(&rxq->napi, ret);
1028 static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
1030 struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
1031 struct iwl_trans_pcie *trans_pcie;
1032 struct iwl_trans *trans;
1035 trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
1036 trans = trans_pcie->trans;
1038 ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
1039 IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret,
1043 int irq_line = rxq->id;
1045 /* FIRST_RSS is shared with line 0 */
1046 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS &&
1050 spin_lock(&trans_pcie->irq_lock);
1051 iwl_pcie_clear_irq(trans, irq_line);
1052 spin_unlock(&trans_pcie->irq_lock);
1054 napi_complete_done(&rxq->napi, ret);
1060 static int _iwl_pcie_rx_init(struct iwl_trans *trans)
1062 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1063 struct iwl_rxq *def_rxq;
1064 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1065 int i, err, queue_size, allocator_pool_size, num_alloc;
1067 if (!trans_pcie->rxq) {
1068 err = iwl_pcie_rx_alloc(trans);
1072 def_rxq = trans_pcie->rxq;
1074 cancel_work_sync(&rba->rx_alloc);
1076 spin_lock_bh(&rba->lock);
1077 atomic_set(&rba->req_pending, 0);
1078 atomic_set(&rba->req_ready, 0);
1079 INIT_LIST_HEAD(&rba->rbd_allocated);
1080 INIT_LIST_HEAD(&rba->rbd_empty);
1081 spin_unlock_bh(&rba->lock);
1083 /* free all first - we overwrite everything here */
1084 iwl_pcie_free_rbs_pool(trans);
1086 for (i = 0; i < RX_QUEUE_SIZE; i++)
1087 def_rxq->queue[i] = NULL;
1089 for (i = 0; i < trans->num_rx_queues; i++) {
1090 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1092 spin_lock_bh(&rxq->lock);
1094 * Set read write pointer to reflect that we have processed
1095 * and used all buffers, but have not restocked the Rx queue
1096 * with fresh buffers
1100 rxq->write_actual = 0;
1101 memset(rxq->rb_stts, 0,
1102 (trans->trans_cfg->device_family >=
1103 IWL_DEVICE_FAMILY_AX210) ?
1104 sizeof(__le16) : sizeof(struct iwl_rb_status));
1106 iwl_pcie_rx_init_rxb_lists(rxq);
1108 spin_unlock_bh(&rxq->lock);
1110 if (!rxq->napi.poll) {
1111 int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll;
1113 if (trans_pcie->msix_enabled)
1114 poll = iwl_pcie_napi_poll_msix;
1116 netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
1117 poll, NAPI_POLL_WEIGHT);
1118 napi_enable(&rxq->napi);
1123 /* move the pool to the default queue and allocator ownerships */
1124 queue_size = trans->trans_cfg->mq_rx_supported ?
1125 trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
1126 allocator_pool_size = trans->num_rx_queues *
1127 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
1128 num_alloc = queue_size + allocator_pool_size;
1130 for (i = 0; i < num_alloc; i++) {
1131 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
1133 if (i < allocator_pool_size)
1134 list_add(&rxb->list, &rba->rbd_empty);
1136 list_add(&rxb->list, &def_rxq->rx_used);
1137 trans_pcie->global_table[i] = rxb;
1138 rxb->vid = (u16)(i + 1);
1139 rxb->invalid = true;
1142 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
1147 int iwl_pcie_rx_init(struct iwl_trans *trans)
1149 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1150 int ret = _iwl_pcie_rx_init(trans);
1155 if (trans->trans_cfg->mq_rx_supported)
1156 iwl_pcie_rx_mq_hw_init(trans);
1158 iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
1160 iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
1162 spin_lock_bh(&trans_pcie->rxq->lock);
1163 iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
1164 spin_unlock_bh(&trans_pcie->rxq->lock);
1169 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
1171 /* Set interrupt coalescing timer to default (2048 usecs) */
1172 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1175 * We don't configure the RFH.
1176 * Restock will be done at alive, after firmware configured the RFH.
1178 return _iwl_pcie_rx_init(trans);
1181 void iwl_pcie_rx_free(struct iwl_trans *trans)
1183 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1184 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1186 size_t rb_stts_size = trans->trans_cfg->device_family >=
1187 IWL_DEVICE_FAMILY_AX210 ?
1188 sizeof(__le16) : sizeof(struct iwl_rb_status);
1191 * if rxq is NULL, it means that nothing has been allocated,
1194 if (!trans_pcie->rxq) {
1195 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
1199 cancel_work_sync(&rba->rx_alloc);
1201 iwl_pcie_free_rbs_pool(trans);
1203 if (trans_pcie->base_rb_stts) {
1204 dma_free_coherent(trans->dev,
1205 rb_stts_size * trans->num_rx_queues,
1206 trans_pcie->base_rb_stts,
1207 trans_pcie->base_rb_stts_dma);
1208 trans_pcie->base_rb_stts = NULL;
1209 trans_pcie->base_rb_stts_dma = 0;
1212 for (i = 0; i < trans->num_rx_queues; i++) {
1213 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1215 iwl_pcie_free_rxq_dma(trans, rxq);
1217 if (rxq->napi.poll) {
1218 napi_disable(&rxq->napi);
1219 netif_napi_del(&rxq->napi);
1222 kfree(trans_pcie->rx_pool);
1223 kfree(trans_pcie->global_table);
1224 kfree(trans_pcie->rxq);
1226 if (trans_pcie->alloc_page)
1227 __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
1230 static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
1231 struct iwl_rb_allocator *rba)
1233 spin_lock(&rba->lock);
1234 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1235 spin_unlock(&rba->lock);
1239 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
1241 * Called when a RBD can be reused. The RBD is transferred to the allocator.
1242 * When there are 2 empty RBDs - a request for allocation is posted
1244 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
1245 struct iwl_rx_mem_buffer *rxb,
1246 struct iwl_rxq *rxq, bool emergency)
1248 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1249 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1251 /* Move the RBD to the used list, will be moved to allocator in batches
1252 * before claiming or posting a request*/
1253 list_add_tail(&rxb->list, &rxq->rx_used);
1255 if (unlikely(emergency))
1258 /* Count the allocator owned RBDs */
1261 /* If we have RX_POST_REQ_ALLOC new released rx buffers -
1262 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
1263 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
1264 * after but we still need to post another request.
1266 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
1267 /* Move the 2 RBDs to the allocator ownership.
1268 Allocator has another 6 from pool for the request completion*/
1269 iwl_pcie_rx_move_to_allocator(rxq, rba);
1271 atomic_inc(&rba->req_pending);
1272 queue_work(rba->alloc_wq, &rba->rx_alloc);
1276 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1277 struct iwl_rxq *rxq,
1278 struct iwl_rx_mem_buffer *rxb,
1282 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1283 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1284 bool page_stolen = false;
1285 int max_len = trans_pcie->rx_buf_bytes;
1291 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1293 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1294 struct iwl_rx_packet *pkt;
1297 struct iwl_rx_cmd_buffer rxcb = {
1298 ._offset = rxb->offset + offset,
1299 ._rx_page_order = trans_pcie->rx_page_order,
1301 ._page_stolen = false,
1302 .truesize = max_len,
1305 pkt = rxb_addr(&rxcb);
1307 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
1309 "Q %d: RB end marker at offset %d\n",
1314 WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1315 FH_RSCSR_RXQ_POS != rxq->id,
1316 "frame on invalid queue - is on %d and indicates %d\n",
1318 (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1322 "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
1324 iwl_get_cmd_string(trans,
1325 WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
1326 pkt->hdr.group_id, pkt->hdr.cmd,
1327 le16_to_cpu(pkt->hdr.sequence));
1329 len = iwl_rx_packet_len(pkt);
1330 len += sizeof(u32); /* account for status word */
1332 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1334 /* check that what the device tells us made sense */
1335 if (len < sizeof(*pkt) || offset > max_len)
1338 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
1339 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1341 /* Reclaim a command buffer only if this packet is a response
1342 * to a (driver-originated) command.
1343 * If the packet (e.g. Rx frame) originated from uCode,
1344 * there is no command buffer to reclaim.
1345 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1346 * but apparently a few don't get set; catch them here. */
1347 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1348 if (reclaim && !pkt->hdr.group_id) {
1351 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1352 if (trans_pcie->no_reclaim_cmds[i] ==
1360 if (rxq->id == trans_pcie->def_rx_queue)
1361 iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1364 iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1368 * After here, we should always check rxcb._page_stolen,
1369 * if it is true then one of the handlers took the page.
1373 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1374 int index = SEQ_TO_INDEX(sequence);
1375 int cmd_index = iwl_txq_get_cmd_index(txq, index);
1377 kfree_sensitive(txq->entries[cmd_index].free_buf);
1378 txq->entries[cmd_index].free_buf = NULL;
1380 /* Invoke any callbacks, transfer the buffer to caller,
1381 * and fire off the (possibly) blocking
1382 * iwl_trans_send_cmd()
1383 * as we reclaim the driver command queue */
1384 if (!rxcb._page_stolen)
1385 iwl_pcie_hcmd_complete(trans, &rxcb);
1387 IWL_WARN(trans, "Claim null rxb?\n");
1390 page_stolen |= rxcb._page_stolen;
1391 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1395 /* page was stolen from us -- free our reference */
1397 __free_pages(rxb->page, trans_pcie->rx_page_order);
1401 /* Reuse the page if possible. For notification packets and
1402 * SKBs that fail to Rx correctly, add them back into the
1403 * rx_free list for reuse later. */
1404 if (rxb->page != NULL) {
1406 dma_map_page(trans->dev, rxb->page, rxb->offset,
1407 trans_pcie->rx_buf_bytes,
1409 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1411 * free the page(s) as well to not break
1412 * the invariant that the items on the used
1413 * list have no page(s)
1415 __free_pages(rxb->page, trans_pcie->rx_page_order);
1417 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1419 list_add_tail(&rxb->list, &rxq->rx_free);
1423 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1426 static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
1427 struct iwl_rxq *rxq, int i,
1430 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1431 struct iwl_rx_mem_buffer *rxb;
1434 BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
1435 BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);
1437 if (!trans->trans_cfg->mq_rx_supported) {
1438 rxb = rxq->queue[i];
1439 rxq->queue[i] = NULL;
1443 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
1444 struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
1446 vid = le16_to_cpu(cd[i].rbid);
1447 *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
1448 } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1449 struct iwl_rx_completion_desc *cd = rxq->used_bd;
1451 vid = le16_to_cpu(cd[i].rbid);
1452 *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
1454 __le32 *cd = rxq->used_bd;
1456 vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */
1459 if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
1462 rxb = trans_pcie->global_table[vid - 1];
1466 IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
1468 rxb->invalid = true;
1473 WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
1474 iwl_force_nmi(trans);
1479 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1481 static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget)
1483 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1484 struct iwl_rxq *rxq;
1485 u32 r, i, count = 0, handled = 0;
1486 bool emergency = false;
1488 if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
1491 rxq = &trans_pcie->rxq[queue];
1494 spin_lock(&rxq->lock);
1495 /* uCode's read index (stored in shared DRAM) indicates the last Rx
1496 * buffer that the driver may process (last buffer filled by ucode). */
1497 r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
1500 /* W/A 9000 device step A0 wrap-around bug */
1501 r &= (rxq->queue_size - 1);
1503 /* Rx interrupt, but nothing sent from uCode */
1505 IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1507 while (i != r && ++handled < budget) {
1508 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1509 struct iwl_rx_mem_buffer *rxb;
1510 /* number of RBDs still waiting for page allocation */
1511 u32 rb_pending_alloc =
1512 atomic_read(&trans_pcie->rba.req_pending) *
1516 if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
1518 iwl_pcie_rx_move_to_allocator(rxq, rba);
1520 IWL_DEBUG_TPT(trans,
1521 "RX path is in emergency. Pending allocations %d\n",
1525 IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1527 rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
1531 if (unlikely(join || rxq->next_rb_is_fragment)) {
1532 rxq->next_rb_is_fragment = join;
1534 * We can only get a multi-RB in the following cases:
1535 * - firmware issue, sending a too big notification
1536 * - sniffer mode with a large A-MSDU
1537 * - large MTU frames (>2k)
1538 * since the multi-RB functionality is limited to newer
1539 * hardware that cannot put multiple entries into a
1542 * Right now, the higher layers aren't set up to deal
1543 * with that, so discard all of these.
1545 list_add_tail(&rxb->list, &rxq->rx_free);
1548 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
1551 i = (i + 1) & (rxq->queue_size - 1);
1554 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1555 * try to claim the pre-allocated buffers from the allocator.
1556 * If not ready - will try to reclaim next time.
1557 * There is no need to reschedule work - allocator exits only
1560 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
1561 iwl_pcie_rx_allocator_get(trans, rxq);
1563 if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1564 /* Add the remaining empty RBDs for allocator use */
1565 iwl_pcie_rx_move_to_allocator(rxq, rba);
1566 } else if (emergency) {
1570 if (rb_pending_alloc < rxq->queue_size / 3) {
1571 IWL_DEBUG_TPT(trans,
1572 "RX path exited emergency. Pending allocations %d\n",
1578 spin_unlock(&rxq->lock);
1579 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1580 iwl_pcie_rxq_restock(trans, rxq);
1586 /* Backtrack one entry */
1588 spin_unlock(&rxq->lock);
1591 * handle a case where in emergency there are some unallocated RBDs.
1592 * those RBDs are in the used list, but are not tracked by the queue's
1593 * used_count which counts allocator owned RBDs.
1594 * unallocated emergency RBDs must be allocated on exit, otherwise
1595 * when called again the function may not be in emergency mode and
1596 * they will be handed to the allocator with no tracking in the RBD
1597 * allocator counters, which will lead to them never being claimed back
1599 * by allocating them here, they are now in the queue free list, and
1600 * will be restocked by the next call of iwl_pcie_rxq_restock.
1602 if (unlikely(emergency && count))
1603 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1605 iwl_pcie_rxq_restock(trans, rxq);
1610 static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
1612 u8 queue = entry->entry;
1613 struct msix_entry *entries = entry - queue;
1615 return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
1619 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
1620 * This interrupt handler should be used with RSS queue only.
1622 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
1624 struct msix_entry *entry = dev_id;
1625 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1626 struct iwl_trans *trans = trans_pcie->trans;
1627 struct iwl_rxq *rxq = &trans_pcie->rxq[entry->entry];
1629 trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
1631 if (WARN_ON(entry->entry >= trans->num_rx_queues))
1635 if (net_ratelimit())
1637 "[%d] Got MSI-X interrupt before we have Rx queues\n",
1642 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1643 IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
1646 if (napi_schedule_prep(&rxq->napi))
1647 __napi_schedule(&rxq->napi);
1649 iwl_pcie_clear_irq(trans, entry->entry);
1652 lock_map_release(&trans->sync_cmd_lockdep_map);
1658 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1660 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1664 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1665 if (trans->cfg->internal_wimax_coex &&
1666 !trans->cfg->apmg_not_supported &&
1667 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1668 APMS_CLK_VAL_MRB_FUNC_MODE) ||
1669 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1670 APMG_PS_CTRL_VAL_RESET_REQ))) {
1671 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1672 iwl_op_mode_wimax_active(trans->op_mode);
1673 wake_up(&trans->wait_command_queue);
1677 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
1678 if (!trans->txqs.txq[i])
1680 del_timer(&trans->txqs.txq[i]->stuck_timer);
1683 /* The STATUS_FW_ERROR bit is set in this function. This must happen
1684 * before we wake up the command caller, to ensure a proper cleanup. */
1685 iwl_trans_fw_error(trans, false);
1687 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1688 wake_up(&trans->wait_command_queue);
1691 static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1695 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1697 trace_iwlwifi_dev_irq(trans->dev);
1699 /* Discover which interrupts are active/pending */
1700 inta = iwl_read32(trans, CSR_INT);
1702 /* the thread will service interrupts and re-enable them */
1706 /* a device (PCI-E) page is 4096 bytes long */
1707 #define ICT_SHIFT 12
1708 #define ICT_SIZE (1 << ICT_SHIFT)
1709 #define ICT_COUNT (ICT_SIZE / sizeof(u32))
1711 /* interrupt handler using ict table, with this interrupt driver will
1712 * stop using INTA register to get device's interrupt, reading this register
1713 * is expensive, device will write interrupts in ICT dram table, increment
1714 * index then will fire interrupt to driver, driver will OR all ICT table
1715 * entries from current index up to table entry with 0 value. the result is
1716 * the interrupt we need to service, driver will set the entries back to 0 and
1719 static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1721 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1726 trace_iwlwifi_dev_irq(trans->dev);
1728 /* Ignore interrupt if there's nothing in NIC to service.
1729 * This may be due to IRQ shared with another device,
1730 * or due to sporadic interrupts thrown from our NIC. */
1731 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1732 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1737 * Collect all entries up to the first 0, starting from ict_index;
1738 * note we already read at ict_index.
1742 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1743 trans_pcie->ict_index, read);
1744 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1745 trans_pcie->ict_index =
1746 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1748 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1749 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1753 /* We should not get this value, just ignore it. */
1754 if (val == 0xffffffff)
1758 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1759 * (bit 15 before shifting it to 31) to clear when using interrupt
1760 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1761 * so we use them to decide on the real state of the Rx bit.
1762 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1767 inta = (0xff & val) | ((0xff00 & val) << 16);
1771 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
1773 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1774 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1775 bool hw_rfkill, prev, report;
1777 mutex_lock(&trans_pcie->mutex);
1778 prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1779 hw_rfkill = iwl_is_rfkill_set(trans);
1781 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1782 set_bit(STATUS_RFKILL_HW, &trans->status);
1784 if (trans_pcie->opmode_down)
1787 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1789 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1790 hw_rfkill ? "disable radio" : "enable radio");
1792 isr_stats->rfkill++;
1795 iwl_trans_pcie_rf_kill(trans, report);
1796 mutex_unlock(&trans_pcie->mutex);
1799 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1801 IWL_DEBUG_RF_KILL(trans,
1802 "Rfkill while SYNC HCMD in flight\n");
1803 wake_up(&trans->wait_command_queue);
1805 clear_bit(STATUS_RFKILL_HW, &trans->status);
1806 if (trans_pcie->opmode_down)
1807 clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1811 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1813 struct iwl_trans *trans = dev_id;
1814 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1815 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1818 bool polling = false;
1820 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1822 spin_lock_bh(&trans_pcie->irq_lock);
1824 /* dram interrupt table not set yet,
1825 * use legacy interrupt.
1827 if (likely(trans_pcie->use_ict))
1828 inta = iwl_pcie_int_cause_ict(trans);
1830 inta = iwl_pcie_int_cause_non_ict(trans);
1832 #ifdef CONFIG_IWLWIFI_DEBUG
1833 if (iwl_have_debug_level(IWL_DL_ISR)) {
1834 IWL_DEBUG_ISR(trans,
1835 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1836 inta, trans_pcie->inta_mask,
1837 iwl_read32(trans, CSR_INT_MASK),
1838 iwl_read32(trans, CSR_FH_INT_STATUS));
1839 if (inta & (~trans_pcie->inta_mask))
1840 IWL_DEBUG_ISR(trans,
1841 "We got a masked interrupt (0x%08x)\n",
1842 inta & (~trans_pcie->inta_mask));
1846 inta &= trans_pcie->inta_mask;
1849 * Ignore interrupt if there's nothing in NIC to service.
1850 * This may be due to IRQ shared with another device,
1851 * or due to sporadic interrupts thrown from our NIC.
1853 if (unlikely(!inta)) {
1854 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1856 * Re-enable interrupts here since we don't
1857 * have anything to service
1859 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1860 _iwl_enable_interrupts(trans);
1861 spin_unlock_bh(&trans_pcie->irq_lock);
1862 lock_map_release(&trans->sync_cmd_lockdep_map);
1866 if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1868 * Hardware disappeared. It might have
1869 * already raised an interrupt.
1871 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1872 spin_unlock_bh(&trans_pcie->irq_lock);
1876 /* Ack/clear/reset pending uCode interrupts.
1877 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1879 /* There is a hardware bug in the interrupt mask function that some
1880 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1881 * they are disabled in the CSR_INT_MASK register. Furthermore the
1882 * ICT interrupt handling mechanism has another bug that might cause
1883 * these unmasked interrupts fail to be detected. We workaround the
1884 * hardware bugs here by ACKing all the possible interrupts so that
1885 * interrupt coalescing can still be achieved.
1887 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1889 #ifdef CONFIG_IWLWIFI_DEBUG
1890 if (iwl_have_debug_level(IWL_DL_ISR))
1891 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1892 inta, iwl_read32(trans, CSR_INT_MASK));
1895 spin_unlock_bh(&trans_pcie->irq_lock);
1897 /* Now service all interrupt bits discovered above. */
1898 if (inta & CSR_INT_BIT_HW_ERR) {
1899 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
1901 /* Tell the device to stop sending interrupts */
1902 iwl_disable_interrupts(trans);
1905 iwl_pcie_irq_handle_error(trans);
1907 handled |= CSR_INT_BIT_HW_ERR;
1912 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1913 if (inta & CSR_INT_BIT_SCD) {
1914 IWL_DEBUG_ISR(trans,
1915 "Scheduler finished to transmit the frame/frames.\n");
1919 /* Alive notification via Rx interrupt will do the real work */
1920 if (inta & CSR_INT_BIT_ALIVE) {
1921 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1923 if (trans->trans_cfg->gen2) {
1925 * We can restock, since firmware configured
1928 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
1931 handled |= CSR_INT_BIT_ALIVE;
1934 /* Safely ignore these bits for debug checks below */
1935 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1937 /* HW RF KILL switch toggled */
1938 if (inta & CSR_INT_BIT_RF_KILL) {
1939 iwl_pcie_handle_rfkill_irq(trans);
1940 handled |= CSR_INT_BIT_RF_KILL;
1943 /* Chip got too hot and stopped itself */
1944 if (inta & CSR_INT_BIT_CT_KILL) {
1945 IWL_ERR(trans, "Microcode CT kill error detected.\n");
1946 isr_stats->ctkill++;
1947 handled |= CSR_INT_BIT_CT_KILL;
1950 /* Error detected by uCode */
1951 if (inta & CSR_INT_BIT_SW_ERR) {
1952 IWL_ERR(trans, "Microcode SW error detected. "
1953 " Restarting 0x%X.\n", inta);
1955 iwl_pcie_irq_handle_error(trans);
1956 handled |= CSR_INT_BIT_SW_ERR;
1959 /* uCode wakes up after power-down sleep */
1960 if (inta & CSR_INT_BIT_WAKEUP) {
1961 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1962 iwl_pcie_rxq_check_wrptr(trans);
1963 iwl_pcie_txq_check_wrptrs(trans);
1965 isr_stats->wakeup++;
1967 handled |= CSR_INT_BIT_WAKEUP;
1970 /* All uCode command responses, including Tx command responses,
1971 * Rx "responses" (frame-received notification), and other
1972 * notifications from uCode come through here*/
1973 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1974 CSR_INT_BIT_RX_PERIODIC)) {
1975 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1976 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1977 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1978 iwl_write32(trans, CSR_FH_INT_STATUS,
1979 CSR_FH_INT_RX_MASK);
1981 if (inta & CSR_INT_BIT_RX_PERIODIC) {
1982 handled |= CSR_INT_BIT_RX_PERIODIC;
1984 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1986 /* Sending RX interrupt require many steps to be done in the
1988 * 1- write interrupt to current index in ICT table.
1990 * 3- update RX shared data to indicate last write index.
1991 * 4- send interrupt.
1992 * This could lead to RX race, driver could receive RX interrupt
1993 * but the shared data changes does not reflect this;
1994 * periodic interrupt will detect any dangling Rx activity.
1997 /* Disable periodic interrupt; we use it as just a one-shot. */
1998 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1999 CSR_INT_PERIODIC_DIS);
2002 * Enable periodic interrupt in 8 msec only if we received
2003 * real RX interrupt (instead of just periodic int), to catch
2004 * any dangling Rx interrupt. If it was just the periodic
2005 * interrupt, there was no dangling Rx activity, and no need
2006 * to extend the periodic interrupt; one-shot is enough.
2008 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
2009 iwl_write8(trans, CSR_INT_PERIODIC_REG,
2010 CSR_INT_PERIODIC_ENA);
2015 if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
2017 __napi_schedule(&trans_pcie->rxq[0].napi);
2022 /* This "Tx" DMA channel is used only for loading uCode */
2023 if (inta & CSR_INT_BIT_FH_TX) {
2024 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
2025 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2027 handled |= CSR_INT_BIT_FH_TX;
2028 /* Wake up uCode load routine, now that load is complete */
2029 trans_pcie->ucode_write_complete = true;
2030 wake_up(&trans_pcie->ucode_write_waitq);
2031 /* Wake up IMR write routine, now that write to SRAM is complete */
2032 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2033 trans_pcie->imr_status = IMR_D2S_COMPLETED;
2034 wake_up(&trans_pcie->ucode_write_waitq);
2038 if (inta & ~handled) {
2039 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
2040 isr_stats->unhandled++;
2043 if (inta & ~(trans_pcie->inta_mask)) {
2044 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
2045 inta & ~trans_pcie->inta_mask);
2049 spin_lock_bh(&trans_pcie->irq_lock);
2050 /* only Re-enable all interrupt if disabled by irq */
2051 if (test_bit(STATUS_INT_ENABLED, &trans->status))
2052 _iwl_enable_interrupts(trans);
2053 /* we are loading the firmware, enable FH_TX interrupt only */
2054 else if (handled & CSR_INT_BIT_FH_TX)
2055 iwl_enable_fw_load_int(trans);
2056 /* Re-enable RF_KILL if it occurred */
2057 else if (handled & CSR_INT_BIT_RF_KILL)
2058 iwl_enable_rfkill_int(trans);
2059 /* Re-enable the ALIVE / Rx interrupt if it occurred */
2060 else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
2061 iwl_enable_fw_load_int_ctx_info(trans);
2062 spin_unlock_bh(&trans_pcie->irq_lock);
2066 lock_map_release(&trans->sync_cmd_lockdep_map);
2070 /******************************************************************************
2074 ******************************************************************************/
2076 /* Free dram table */
2077 void iwl_pcie_free_ict(struct iwl_trans *trans)
2079 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2081 if (trans_pcie->ict_tbl) {
2082 dma_free_coherent(trans->dev, ICT_SIZE,
2083 trans_pcie->ict_tbl,
2084 trans_pcie->ict_tbl_dma);
2085 trans_pcie->ict_tbl = NULL;
2086 trans_pcie->ict_tbl_dma = 0;
2091 * allocate dram shared table, it is an aligned memory
2092 * block of ICT_SIZE.
2093 * also reset all data related to ICT table interrupt.
2095 int iwl_pcie_alloc_ict(struct iwl_trans *trans)
2097 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2099 trans_pcie->ict_tbl =
2100 dma_alloc_coherent(trans->dev, ICT_SIZE,
2101 &trans_pcie->ict_tbl_dma, GFP_KERNEL);
2102 if (!trans_pcie->ict_tbl)
2105 /* just an API sanity check ... it is guaranteed to be aligned */
2106 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
2107 iwl_pcie_free_ict(trans);
2114 /* Device is going up inform it about using ICT interrupt table,
2115 * also we need to tell the driver to start using ICT interrupt.
2117 void iwl_pcie_reset_ict(struct iwl_trans *trans)
2119 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2122 if (!trans_pcie->ict_tbl)
2125 spin_lock_bh(&trans_pcie->irq_lock);
2126 _iwl_disable_interrupts(trans);
2128 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
2130 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
2132 val |= CSR_DRAM_INT_TBL_ENABLE |
2133 CSR_DRAM_INIT_TBL_WRAP_CHECK |
2134 CSR_DRAM_INIT_TBL_WRITE_POINTER;
2136 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
2138 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
2139 trans_pcie->use_ict = true;
2140 trans_pcie->ict_index = 0;
2141 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
2142 _iwl_enable_interrupts(trans);
2143 spin_unlock_bh(&trans_pcie->irq_lock);
2146 /* Device is going down disable ict interrupt usage */
2147 void iwl_pcie_disable_ict(struct iwl_trans *trans)
2149 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2151 spin_lock_bh(&trans_pcie->irq_lock);
2152 trans_pcie->use_ict = false;
2153 spin_unlock_bh(&trans_pcie->irq_lock);
2156 irqreturn_t iwl_pcie_isr(int irq, void *data)
2158 struct iwl_trans *trans = data;
2163 /* Disable (but don't clear!) interrupts here to avoid
2164 * back-to-back ISRs and sporadic interrupts from our NIC.
2165 * If we have something to service, the tasklet will re-enable ints.
2166 * If we *don't* have something, we'll re-enable before leaving here.
2168 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
2170 return IRQ_WAKE_THREAD;
2173 irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
2175 return IRQ_WAKE_THREAD;
2178 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
2180 struct msix_entry *entry = dev_id;
2181 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
2182 struct iwl_trans *trans = trans_pcie->trans;
2183 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2184 u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE;
2185 u32 inta_fh, inta_hw;
2186 bool polling = false;
2189 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
2190 inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0;
2192 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
2193 inta_fh_msk |= MSIX_FH_INT_CAUSES_Q1;
2195 lock_map_acquire(&trans->sync_cmd_lockdep_map);
2197 spin_lock_bh(&trans_pcie->irq_lock);
2198 inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
2199 inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
2201 * Clear causes registers to avoid being handling the same cause.
2203 iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk);
2204 iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
2205 spin_unlock_bh(&trans_pcie->irq_lock);
2207 trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
2209 if (unlikely(!(inta_fh | inta_hw))) {
2210 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
2211 lock_map_release(&trans->sync_cmd_lockdep_map);
2215 #ifdef CONFIG_IWLWIFI_DEBUG
2216 if (iwl_have_debug_level(IWL_DL_ISR)) {
2217 IWL_DEBUG_ISR(trans,
2218 "ISR[%d] inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2219 entry->entry, inta_fh, trans_pcie->fh_mask,
2220 iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
2221 if (inta_fh & ~trans_pcie->fh_mask)
2222 IWL_DEBUG_ISR(trans,
2223 "We got a masked interrupt (0x%08x)\n",
2224 inta_fh & ~trans_pcie->fh_mask);
2228 inta_fh &= trans_pcie->fh_mask;
2230 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
2231 inta_fh & MSIX_FH_INT_CAUSES_Q0) {
2233 if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
2235 __napi_schedule(&trans_pcie->rxq[0].napi);
2240 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
2241 inta_fh & MSIX_FH_INT_CAUSES_Q1) {
2243 if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) {
2245 __napi_schedule(&trans_pcie->rxq[1].napi);
2250 /* This "Tx" DMA channel is used only for loading uCode */
2251 if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM &&
2252 trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2253 IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n");
2256 /* Wake up IMR routine once write to SRAM is complete */
2257 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2258 trans_pcie->imr_status = IMR_D2S_COMPLETED;
2259 wake_up(&trans_pcie->ucode_write_waitq);
2261 } else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
2262 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2265 * Wake up uCode load routine,
2266 * now that load is complete
2268 trans_pcie->ucode_write_complete = true;
2269 wake_up(&trans_pcie->ucode_write_waitq);
2271 /* Wake up IMR routine once write to SRAM is complete */
2272 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2273 trans_pcie->imr_status = IMR_D2S_COMPLETED;
2274 wake_up(&trans_pcie->ucode_write_waitq);
2278 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
2279 sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
2281 sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR;
2283 /* Error detected by uCode */
2284 if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) {
2286 "Microcode SW error detected. Restarting 0x%X.\n",
2289 /* during FW reset flow report errors from there */
2290 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2291 trans_pcie->imr_status = IMR_D2S_ERROR;
2292 wake_up(&trans_pcie->imr_waitq);
2293 } else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
2294 trans_pcie->fw_reset_state = FW_RESET_ERROR;
2295 wake_up(&trans_pcie->fw_reset_waitq);
2297 iwl_pcie_irq_handle_error(trans);
2301 /* After checking FH register check HW register */
2302 #ifdef CONFIG_IWLWIFI_DEBUG
2303 if (iwl_have_debug_level(IWL_DL_ISR)) {
2304 IWL_DEBUG_ISR(trans,
2305 "ISR[%d] inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2306 entry->entry, inta_hw, trans_pcie->hw_mask,
2307 iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
2308 if (inta_hw & ~trans_pcie->hw_mask)
2309 IWL_DEBUG_ISR(trans,
2310 "We got a masked interrupt 0x%08x\n",
2311 inta_hw & ~trans_pcie->hw_mask);
2315 inta_hw &= trans_pcie->hw_mask;
2317 /* Alive notification via Rx interrupt will do the real work */
2318 if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
2319 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
2321 if (trans->trans_cfg->gen2) {
2322 /* We can restock, since firmware configured the RFH */
2323 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2328 * In some rare cases when the HW is in a bad state, we may
2329 * get this interrupt too early, when prph_info is still NULL.
2330 * So make sure that it's not NULL to prevent crashing.
2332 if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) {
2334 le32_to_cpu(trans_pcie->prph_info->sleep_notif);
2335 if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
2336 sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) {
2337 IWL_DEBUG_ISR(trans,
2338 "Sx interrupt: sleep notification = 0x%x\n",
2340 trans_pcie->sx_complete = true;
2341 wake_up(&trans_pcie->sx_waitq);
2343 /* uCode wakes up after power-down sleep */
2344 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
2345 iwl_pcie_rxq_check_wrptr(trans);
2346 iwl_pcie_txq_check_wrptrs(trans);
2348 isr_stats->wakeup++;
2352 /* Chip got too hot and stopped itself */
2353 if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
2354 IWL_ERR(trans, "Microcode CT kill error detected.\n");
2355 isr_stats->ctkill++;
2358 /* HW RF KILL switch toggled */
2359 if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
2360 iwl_pcie_handle_rfkill_irq(trans);
2362 if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
2364 "Hardware error detected. Restarting.\n");
2367 trans->dbg.hw_error = true;
2368 iwl_pcie_irq_handle_error(trans);
2371 if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) {
2372 IWL_DEBUG_ISR(trans, "Reset flow completed\n");
2373 trans_pcie->fw_reset_state = FW_RESET_OK;
2374 wake_up(&trans_pcie->fw_reset_waitq);
2378 iwl_pcie_clear_irq(trans, entry->entry);
2380 lock_map_release(&trans->sync_cmd_lockdep_map);