From b837332d0a4090781a3d7b01a092e164dd2a24ca Mon Sep 17 00:00:00 2001 From: Adrian Chadd Date: Sun, 24 Mar 2013 00:03:12 +0000 Subject: [PATCH] Overhaul the TXQ locking (again!) as part of some beacon/cabq timing related issues. Moving the TX locking under one lock made things easier to progress on but it had one important side-effect - it increased the latency when handling CABQ setup when sending beacons. This commit introduces a bunch of new changes and a few unrelated changs that are just easier to lump in here. The aim is to have the CABQ locking separate from other locking. The CABQ transmit path in the beacon process thus doesn't have to grab the general TX lock, reducing lock contention/latency and making it more likely that we'll make the beacon TX timing. The second half of this commit is the CABQ related setup changes needed for sane looking EDMA CABQ support. Right now the EDMA TX code naively assumes that only one frame (MPDU or A-MPDU) is being pushed into each FIFO slot. For the CABQ this isn't true - a whole list of frames is being pushed in - and thus CABQ handling breaks very quickly. The aim here is to setup the CABQ list and then push _that list_ to the hardware for transmission. I can then extend the EDMA TX code to stamp that list as being "one" FIFO entry (likely by tagging the last buffer in that list as "FIFO END") so the EDMA TX completion code correctly tracks things. Major: * Migrate the per-TXQ add/removal locking back to per-TXQ, rather than a single lock. * Leave the software queue side of things under the ATH_TX_LOCK lock, (continuing) to serialise things as they are. * Add a new function which is called whenever there's a beacon miss, to print out some debugging. This is primarily designed to help me figure out if the beacon miss events are due to a noisy environment, issues with the PHY/MAC, or other. * Move the CABQ setup/enable to occur _after_ all the VAPs have been looked at. This means that for multiple VAPS in bursted mode, the CABQ gets primed once all VAPs are checked, rather than being primed on the first VAP and then having frames appended after this. Minor: * Add a (disabled) twiddle to let me enable/disable cabq traffic. It's primarily there to let me easily debug what's going on with beacon and CABQ setup/traffic; there's some DMA engine hangs which I'm finally trying to trace down. * Clear bf_next when flushing frames; it should quieten some warnings that show up when a node goes away. Tested: * AR9280, STA/hostap, up to 4 vaps (staggered) * AR5416, STA/hostap, up to 4 vaps (staggered) TODO: * (Lots) more AR9380 and later testing, as I may have missed something here. * Leverage this to fix CABQ hanling for AR9380 and later chips. * Force bursted beaconing on the chips that default to staggered beacons and ensure the CABQ stuff is all sane (eg, the MORE bits that aren't being correctly set when chaining descriptors.) --- sys/dev/ath/if_ath.c | 31 ++++++---- sys/dev/ath/if_ath_beacon.c | 108 +++++++++++++++++++++++++++++------ sys/dev/ath/if_ath_beacon.h | 1 + sys/dev/ath/if_ath_misc.h | 3 + sys/dev/ath/if_ath_sysctl.c | 7 +++ sys/dev/ath/if_ath_tdma.c | 8 ++- sys/dev/ath/if_ath_tx.c | 16 +++++- sys/dev/ath/if_ath_tx_edma.c | 19 +++--- sys/dev/ath/if_athvar.h | 22 ++++++- 9 files changed, 171 insertions(+), 44 deletions(-) diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c index 2fe8de3d3f2..022e80d54c8 100644 --- a/sys/dev/ath/if_ath.c +++ b/sys/dev/ath/if_ath.c @@ -694,6 +694,9 @@ ath_attach(u_int16_t devid, struct ath_softc *sc) */ sc->sc_txq_mcastq_maxdepth = ath_txbuf; + /* Enable CABQ by default */ + sc->sc_cabq_enable = 1; + /* * Allow the TX and RX chainmasks to be overridden by * environment variables and/or device.hints. @@ -1899,7 +1902,7 @@ ath_bmiss_vap(struct ieee80211vap *vap) ATH_VAP(vap)->av_bmiss(vap); } -static int +int ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) { uint32_t rsize; @@ -2364,14 +2367,17 @@ ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type) /* Restart TX completion and pending TX */ if (reset_type == ATH_RESET_NOLOSS) { - ATH_TX_LOCK(sc); for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { if (ATH_TXQ_SETUP(sc, i)) { + ATH_TXQ_LOCK(&sc->sc_txq[i]); ath_txq_restart_dma(sc, &sc->sc_txq[i]); + ATH_TXQ_UNLOCK(&sc->sc_txq[i]); + + ATH_TX_LOCK(sc); ath_txq_sched(sc, &sc->sc_txq[i]); + ATH_TX_UNLOCK(sc); } } - ATH_TX_UNLOCK(sc); } /* @@ -2922,6 +2928,9 @@ void ath_txqmove(struct ath_txq *dst, struct ath_txq *src) { + ATH_TXQ_LOCK_ASSERT(src); + ATH_TXQ_LOCK_ASSERT(dst); + TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); dst->axq_link = src->axq_link; src->axq_link = NULL; @@ -3401,6 +3410,7 @@ ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) txq->axq_softc = sc; TAILQ_INIT(&txq->axq_q); TAILQ_INIT(&txq->axq_tidq); + ATH_TXQ_LOCK_INIT(sc, txq); } /* @@ -3585,6 +3595,7 @@ ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); sc->sc_txqsetup &= ~(1<axq_qnum); + ATH_TXQ_LOCK_DESTROY(txq); } /* @@ -3837,11 +3848,11 @@ ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) nacked = 0; for (;;) { - ATH_TX_LOCK(sc); + ATH_TXQ_LOCK(txq); txq->axq_intrcnt = 0; /* reset periodic desc intr count */ bf = TAILQ_FIRST(&txq->axq_q); if (bf == NULL) { - ATH_TX_UNLOCK(sc); + ATH_TXQ_UNLOCK(txq); break; } ds = bf->bf_lastds; /* XXX must be setup correctly! */ @@ -3869,7 +3880,7 @@ ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) ATH_KTR(sc, ATH_KTR_TXCOMP, 3, "ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS", txq->axq_qnum, bf, ds); - ATH_TX_UNLOCK(sc); + ATH_TXQ_UNLOCK(txq); break; } ATH_TXQ_REMOVE(txq, bf, bf_list); @@ -3906,7 +3917,7 @@ ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, ts->ts_rssi); } - ATH_TX_UNLOCK(sc); + ATH_TXQ_UNLOCK(txq); /* * Update statistics and call completion @@ -4286,7 +4297,7 @@ ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) * we do not need to block ath_tx_proc */ for (ix = 0;; ix++) { - ATH_TX_LOCK(sc); + ATH_TXQ_LOCK(txq); bf = TAILQ_FIRST(&txq->axq_q); if (bf == NULL) { txq->axq_link = NULL; @@ -4301,7 +4312,7 @@ ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) * very fruity very quickly. */ txq->axq_fifo_depth = 0; - ATH_TX_UNLOCK(sc); + ATH_TXQ_UNLOCK(txq); break; } ATH_TXQ_REMOVE(txq, bf, bf_list); @@ -4337,7 +4348,7 @@ ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) * Clear ATH_BUF_BUSY; the completion handler * will free the buffer. */ - ATH_TX_UNLOCK(sc); + ATH_TXQ_UNLOCK(txq); bf->bf_flags &= ~ATH_BUF_BUSY; if (bf->bf_comp) bf->bf_comp(sc, bf, 1); diff --git a/sys/dev/ath/if_ath_beacon.c b/sys/dev/ath/if_ath_beacon.c index 267a9fd0a7e..e847f9361fd 100644 --- a/sys/dev/ath/if_ath_beacon.c +++ b/sys/dev/ath/if_ath_beacon.c @@ -378,6 +378,39 @@ ath_beacon_update(struct ieee80211vap *vap, int item) setbit(bo->bo_flags, item); } +/* + * Handle a beacon miss. + */ +static void +ath_beacon_miss(struct ath_softc *sc) +{ + HAL_SURVEY_SAMPLE hs; + HAL_BOOL ret; + uint32_t hangs; + + bzero(&hs, sizeof(hs)); + + ret = ath_hal_get_mib_cycle_counts(sc->sc_ah, &hs); + + if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && hangs != 0) { + DPRINTF(sc, ATH_DEBUG_BEACON, + "%s: hang=0x%08x\n", + __func__, + hangs); + } + + DPRINTF(sc, ATH_DEBUG_BEACON, + "%s: valid=%d, txbusy=%u, rxbusy=%u, chanbusy=%u, " + "extchanbusy=%u, cyclecount=%u\n", + __func__, + ret, + hs.tx_busy, + hs.rx_busy, + hs.chan_busy, + hs.ext_chan_busy, + hs.cycle_count); +} + /* * Transmit a beacon frame at SWBA. Dynamic updates to the * frame contents are done as needed and the slot time is @@ -405,6 +438,7 @@ ath_beacon_proc(void *arg, int pending) if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { sc->sc_bmisscount++; sc->sc_stats.ast_be_missed++; + ath_beacon_miss(sc); DPRINTF(sc, ATH_DEBUG_BEACON, "%s: missed %u consecutive beacons\n", __func__, sc->sc_bmisscount); @@ -478,6 +512,12 @@ ath_beacon_proc(void *arg, int pending) sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; } + /* Program the CABQ with the contents of the CABQ txq and start it */ + ATH_TXQ_LOCK(sc->sc_cabq); + ath_beacon_cabq_start(sc); + ATH_TXQ_UNLOCK(sc->sc_cabq); + + /* Program the new beacon frame if we have one for this interval */ if (bfaddr != 0) { /* * Stop any current dma and put the new frame on the queue. @@ -500,6 +540,33 @@ ath_beacon_proc(void *arg, int pending) } } +/* + * Start CABQ transmission - this assumes that all frames are prepped + * and ready in the CABQ. + * + * XXX TODO: methodize this; for the EDMA case it should only push + * into the hardware if the FIFO isn't full _AND_ then it should + * tag the final buffer in the queue as ATH_BUF_FIFOEND so the FIFO + * depth is correctly accounted for. + */ +void +ath_beacon_cabq_start(struct ath_softc *sc) +{ + struct ath_buf *bf; + struct ath_txq *cabq = sc->sc_cabq; + + ATH_TXQ_LOCK_ASSERT(cabq); + if (TAILQ_EMPTY(&cabq->axq_q)) + return; + bf = TAILQ_FIRST(&cabq->axq_q); + + /* Push the first entry into the hardware */ + ath_hal_puttxbuf(sc->sc_ah, cabq->axq_qnum, bf->bf_daddr); + + /* NB: gated by beacon so safe to start here */ + ath_hal_txstart(sc->sc_ah, cabq->axq_qnum); +} + struct ath_buf * ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap) { @@ -561,38 +628,43 @@ ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap) * insure cab frames are triggered by this beacon. */ if (avp->av_boff.bo_tim[4] & 1) { - struct ath_hal *ah = sc->sc_ah; /* NB: only at DTIM */ - ATH_TX_LOCK(sc); + ATH_TXQ_LOCK(&avp->av_mcastq); if (nmcastq) { struct ath_buf *bfm; /* * Move frames from the s/w mcast q to the h/w cab q. - * XXX TODO: walk the list, update MORE_DATA bit - * XXX TODO: or maybe, set the MORE data bit in the - * TX descriptor(s) here? * - * XXX TODO: we're still pushing a CABQ frame list to - * AR9380 hosts; but we don't (yet) populate - * the ATH_BUF_BUSY flag in the EDMA - * completion task (for CABQ, though!) + * XXX TODO: This should be methodized - the EDMA + * CABQ setup code may look different! + * + * XXX TODO: if we chain together multiple VAPs + * worth of CABQ traffic, should we keep the + * MORE data bit set on the last frame of each + * intermediary VAP (ie, only clear the MORE + * bit of the last frame on the last vap?) + * + * XXX TODO: once we append this, what happens + * to cabq->axq_link? It'll point at the avp + * mcastq link pointer, so things should be OK. + * Just double-check this is what actually happens. */ bfm = TAILQ_FIRST(&avp->av_mcastq.axq_q); - if (cabq->axq_link != NULL) { + ATH_TXQ_LOCK(cabq); + if (cabq->axq_link != NULL) *cabq->axq_link = bfm->bf_daddr; - } else - ath_hal_puttxbuf(ah, cabq->axq_qnum, - bfm->bf_daddr); ath_txqmove(cabq, &avp->av_mcastq); - + ATH_TXQ_UNLOCK(cabq); + /* + * XXX not entirely accurate, in case a mcast + * queue frame arrived before we grabbed the TX + * lock. + */ sc->sc_stats.ast_cabq_xmit += nmcastq; } - /* NB: gated by beacon so safe to start here */ - if (! TAILQ_EMPTY(&(cabq->axq_q))) - ath_hal_txstart(ah, cabq->axq_qnum); - ATH_TX_UNLOCK(sc); + ATH_TXQ_UNLOCK(&avp->av_mcastq); } return bf; } diff --git a/sys/dev/ath/if_ath_beacon.h b/sys/dev/ath/if_ath_beacon.h index 1effa224dfa..f3f73d7166c 100644 --- a/sys/dev/ath/if_ath_beacon.h +++ b/sys/dev/ath/if_ath_beacon.h @@ -39,6 +39,7 @@ extern void ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap); extern struct ath_buf * ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap); +extern void ath_beacon_cabq_start(struct ath_softc *sc); extern int ath_wme_update(struct ieee80211com *ic); extern void ath_beacon_update(struct ieee80211vap *vap, int item); extern void ath_beacon_start_adhoc(struct ath_softc *sc, diff --git a/sys/dev/ath/if_ath_misc.h b/sys/dev/ath/if_ath_misc.h index 167c8267d9b..11957ff93ac 100644 --- a/sys/dev/ath/if_ath_misc.h +++ b/sys/dev/ath/if_ath_misc.h @@ -72,6 +72,9 @@ extern void ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, int nframes, int nbad); +extern int ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, + uint32_t *hangs); + extern void ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status); diff --git a/sys/dev/ath/if_ath_sysctl.c b/sys/dev/ath/if_ath_sysctl.c index f55cec88214..cd32e5949e0 100644 --- a/sys/dev/ath/if_ath_sysctl.c +++ b/sys/dev/ath/if_ath_sysctl.c @@ -754,6 +754,13 @@ ath_sysctlattach(struct ath_softc *sc) &sc->sc_txq_mcastq_maxdepth, 0, "Maximum buffer depth for multicast/broadcast frames"); +#if 0 + SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, + "cabq_enable", CTLFLAG_RW, + &sc->sc_cabq_enable, 0, + "Whether to transmit on the CABQ or not"); +#endif + #ifdef IEEE80211_SUPPORT_TDMA if (ath_hal_macversion(ah) > 0x78) { sc->sc_tdmadbaprep = 2; diff --git a/sys/dev/ath/if_ath_tdma.c b/sys/dev/ath/if_ath_tdma.c index 5cfe238c09e..1d9a95f0be2 100644 --- a/sys/dev/ath/if_ath_tdma.c +++ b/sys/dev/ath/if_ath_tdma.c @@ -612,13 +612,19 @@ ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap) } bf = ath_beacon_generate(sc, vap); + /* XXX We don't do cabq traffic, but just for completeness .. */ + ATH_TXQ_LOCK(sc->sc_cabq); + ath_beacon_cabq_start(sc); + ATH_TXQ_UNLOCK(sc->sc_cabq); + if (bf != NULL) { /* * Stop any current dma and put the new frame on the queue. * This should never fail since we check above that no frames * are still pending on the queue. */ - if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { + if ((! sc->sc_isedma) && + (! ath_hal_stoptxdma(ah, sc->sc_bhalq))) { DPRINTF(sc, ATH_DEBUG_ANY, "%s: beacon queue %u did not stop?\n", __func__, sc->sc_bhalq); diff --git a/sys/dev/ath/if_ath_tx.c b/sys/dev/ath/if_ath_tx.c index 092cecd9d72..09bacf38348 100644 --- a/sys/dev/ath/if_ath_tx.c +++ b/sys/dev/ath/if_ath_tx.c @@ -715,8 +715,10 @@ ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, /* link descriptor */ *txq->axq_link = bf->bf_daddr; } + ATH_TXQ_LOCK(txq); ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); ath_hal_gettxdesclinkptr(sc->sc_ah, bf->bf_lastds, &txq->axq_link); + ATH_TXQ_UNLOCK(txq); } /* @@ -774,6 +776,7 @@ ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, /* For now, so not to generate whitespace diffs */ if (1) { + ATH_TXQ_LOCK(txq); #ifdef IEEE80211_SUPPORT_TDMA int qbusy; @@ -899,6 +902,7 @@ ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, txq->axq_aggr_depth++; ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); ath_hal_txstart(ah, txq->axq_qnum); + ATH_TXQ_UNLOCK(txq); ATH_KTR(sc, ATH_KTR_TX, 1, "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); } @@ -915,8 +919,7 @@ ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq) struct ath_hal *ah = sc->sc_ah; struct ath_buf *bf, *bf_last; - ATH_TX_LOCK_ASSERT(sc); - + ATH_TXQ_LOCK_ASSERT(txq); /* This is always going to be cleared, empty or not */ txq->axq_flags &= ~ATH_TXQ_PUTPENDING; @@ -1834,6 +1837,7 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, bf->bf_state.bfs_tx_queue = txq->axq_qnum; bf->bf_state.bfs_pri = pri; +#if 1 /* * When servicing one or more stations in power-save mode * (or) if there is some mcast data waiting on the mcast @@ -1842,7 +1846,7 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, * * TODO: we should lock the mcastq before we check the length. */ - if (ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { + if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { txq = &avp->av_mcastq; /* * Mark the frame as eventually belonging on the CAB @@ -1851,6 +1855,7 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, */ bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum; } +#endif /* Do the generic frame setup */ /* XXX should just bzero the bf_state? */ @@ -3380,6 +3385,11 @@ ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an, __func__, SEQNO(bf->bf_state.bfs_seqno)); #endif } + + /* Strip it out of an aggregate list if it was in one */ + bf->bf_next = NULL; + + /* Insert on the free queue to be freed by the caller */ TAILQ_INSERT_TAIL(bf_cq, bf, bf_list); } diff --git a/sys/dev/ath/if_ath_tx_edma.c b/sys/dev/ath/if_ath_tx_edma.c index 9bfe19d1be0..07b7d6e6d02 100644 --- a/sys/dev/ath/if_ath_tx_edma.c +++ b/sys/dev/ath/if_ath_tx_edma.c @@ -142,7 +142,7 @@ ath_edma_tx_fifo_fill(struct ath_softc *sc, struct ath_txq *txq) struct ath_buf *bf; int i = 0; - ATH_TX_LOCK_ASSERT(sc); + ATH_TXQ_LOCK_ASSERT(txq); DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: called\n", __func__); @@ -181,9 +181,8 @@ ath_edma_dma_restart(struct ath_softc *sc, struct ath_txq *txq) txq, txq->axq_qnum); - ATH_TX_LOCK_ASSERT(sc); + ATH_TXQ_LOCK_ASSERT(txq); ath_edma_tx_fifo_fill(sc, txq); - } /* @@ -204,7 +203,7 @@ ath_edma_xmit_handoff_hw(struct ath_softc *sc, struct ath_txq *txq, { struct ath_hal *ah = sc->sc_ah; - ATH_TX_LOCK_ASSERT(sc); + ATH_TXQ_LOCK_ASSERT(txq); KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, ("%s: busy status 0x%x", __func__, bf->bf_flags)); @@ -249,7 +248,7 @@ ath_edma_xmit_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { - ATH_TX_LOCK_ASSERT(sc); + ATH_TXQ_LOCK_ASSERT(txq); KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, ("%s: busy status 0x%x", __func__, bf->bf_flags)); @@ -303,8 +302,6 @@ ath_edma_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { - ATH_TX_LOCK_ASSERT(sc); - DPRINTF(sc, ATH_DEBUG_XMIT_DESC, "%s: called; bf=%p, txq=%p, qnum=%d\n", __func__, @@ -526,7 +523,7 @@ ath_edma_tx_processq(struct ath_softc *sc, int dosched) txq = &sc->sc_txq[ts.ts_queue_id]; - ATH_TX_LOCK(sc); + ATH_TXQ_LOCK(txq); bf = TAILQ_FIRST(&txq->axq_q); DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: qcuid=%d, bf=%p\n", @@ -554,7 +551,7 @@ ath_edma_tx_processq(struct ath_softc *sc, int dosched) txq->axq_aggr_depth--; txq->axq_fifo_depth --; /* XXX assert FIFO depth >= 0 */ - ATH_TX_UNLOCK(sc); + ATH_TXQ_UNLOCK(txq); /* * First we need to make sure ts_rate is valid. @@ -636,11 +633,11 @@ ath_edma_tx_processq(struct ath_softc *sc, int dosched) * to begin validating that things are somewhat * working. */ - ATH_TX_LOCK(sc); + ATH_TXQ_LOCK(txq); if (dosched && txq->axq_fifo_depth == 0) { ath_edma_tx_fifo_fill(sc, txq); } - ATH_TX_UNLOCK(sc); + ATH_TXQ_UNLOCK(txq); } sc->sc_wd_timer = 0; diff --git a/sys/dev/ath/if_athvar.h b/sys/dev/ath/if_athvar.h index bf5a4c8ed10..e47d9e3a445 100644 --- a/sys/dev/ath/if_athvar.h +++ b/sys/dev/ath/if_athvar.h @@ -329,9 +329,11 @@ struct ath_txq { u_int axq_intrcnt; /* interrupt count */ u_int32_t *axq_link; /* link ptr in last TX desc */ TAILQ_HEAD(axq_q_s, ath_buf) axq_q; /* transmit queue */ + struct mtx axq_lock; /* lock on q and link */ + /* * XXX the holdingbf field is protected by the TXBUF lock - * for now, NOT the TX lock. + * for now, NOT the TXQ lock. * * Architecturally, it would likely be better to move * the holdingbf field to a separate array in ath_softc @@ -342,9 +344,24 @@ struct ath_txq { char axq_name[12]; /* e.g. "ath0_txq4" */ /* Per-TID traffic queue for software -> hardware TX */ + /* + * This is protected by the general TX path lock, not (for now) + * by the TXQ lock. + */ TAILQ_HEAD(axq_t_s,ath_tid) axq_tidq; }; +#define ATH_TXQ_LOCK_INIT(_sc, _tq) do { \ + snprintf((_tq)->axq_name, sizeof((_tq)->axq_name), "%s_txq%u", \ + device_get_nameunit((_sc)->sc_dev), (_tq)->axq_qnum); \ + mtx_init(&(_tq)->axq_lock, (_tq)->axq_name, NULL, MTX_DEF); \ + } while (0) +#define ATH_TXQ_LOCK_DESTROY(_tq) mtx_destroy(&(_tq)->axq_lock) +#define ATH_TXQ_LOCK(_tq) mtx_lock(&(_tq)->axq_lock) +#define ATH_TXQ_UNLOCK(_tq) mtx_unlock(&(_tq)->axq_lock) +#define ATH_TXQ_LOCK_ASSERT(_tq) mtx_assert(&(_tq)->axq_lock, MA_OWNED) + + #define ATH_NODE_LOCK(_an) mtx_lock(&(_an)->an_mtx) #define ATH_NODE_UNLOCK(_an) mtx_unlock(&(_an)->an_mtx) #define ATH_NODE_LOCK_ASSERT(_an) mtx_assert(&(_an)->an_mtx, MA_OWNED) @@ -584,6 +601,9 @@ struct ath_softc { sc_rx_stbc : 1, sc_tx_stbc : 1; + + int sc_cabq_enable; /* Enable cabq transmission */ + /* * Enterprise mode configuration for AR9380 and later chipsets. */ -- 2.45.2