2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 * Driver for the Atheros Wireless LAN controller.
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
43 * This is needed for register operations which are performed
44 * by the driver - eg, calls to ath_hal_gettsf32().
46 * It's also required for any AH_DEBUG checks in here, eg the
47 * module dependencies.
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/sysctl.h>
56 #include <sys/malloc.h>
58 #include <sys/mutex.h>
59 #include <sys/kernel.h>
60 #include <sys/socket.h>
61 #include <sys/sockio.h>
62 #include <sys/errno.h>
63 #include <sys/callout.h>
65 #include <sys/endian.h>
66 #include <sys/kthread.h>
67 #include <sys/taskqueue.h>
69 #include <sys/module.h>
71 #include <sys/smp.h> /* for mp_ncpus */
73 #include <machine/bus.h>
76 #include <net/if_dl.h>
77 #include <net/if_media.h>
78 #include <net/if_types.h>
79 #include <net/if_arp.h>
80 #include <net/ethernet.h>
81 #include <net/if_llc.h>
83 #include <net80211/ieee80211_var.h>
84 #include <net80211/ieee80211_regdomain.h>
85 #ifdef IEEE80211_SUPPORT_SUPERG
86 #include <net80211/ieee80211_superg.h>
88 #ifdef IEEE80211_SUPPORT_TDMA
89 #include <net80211/ieee80211_tdma.h>
95 #include <netinet/in.h>
96 #include <netinet/if_ether.h>
99 #include <dev/ath/if_athvar.h>
100 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
101 #include <dev/ath/ath_hal/ah_diagcodes.h>
103 #include <dev/ath/if_ath_debug.h>
104 #include <dev/ath/if_ath_misc.h>
105 #include <dev/ath/if_ath_tsf.h>
106 #include <dev/ath/if_ath_tx.h>
107 #include <dev/ath/if_ath_sysctl.h>
108 #include <dev/ath/if_ath_led.h>
109 #include <dev/ath/if_ath_keycache.h>
110 #include <dev/ath/if_ath_rx.h>
111 #include <dev/ath/if_ath_rx_edma.h>
112 #include <dev/ath/if_ath_tx_edma.h>
113 #include <dev/ath/if_ath_beacon.h>
114 #include <dev/ath/if_ath_spectral.h>
115 #include <dev/ath/if_athdfs.h>
118 #include <dev/ath/ath_tx99/ath_tx99.h>
122 #include <dev/ath/if_ath_alq.h>
126 * Only enable this if you're working on PS-POLL support.
131 * ATH_BCBUF determines the number of vap's that can transmit
132 * beacons and also (currently) the number of vap's that can
133 * have unique mac addresses/bssid. When staggering beacons
134 * 4 is probably a good max as otherwise the beacons become
135 * very closely spaced and there is limited time for cab q traffic
136 * to go out. You can burst beacons instead but that is not good
137 * for stations in power save and at some point you really want
138 * another radio (and channel).
140 * The limit on the number of mac addresses is tied to our use of
141 * the U/L bit and tracking addresses in a byte; it would be
142 * worthwhile to allow more for applications like proxy sta.
144 CTASSERT(ATH_BCBUF <= 8);
146 static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
147 const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
148 const uint8_t [IEEE80211_ADDR_LEN],
149 const uint8_t [IEEE80211_ADDR_LEN]);
150 static void ath_vap_delete(struct ieee80211vap *);
151 static void ath_init(void *);
152 static void ath_stop_locked(struct ifnet *);
153 static void ath_stop(struct ifnet *);
154 static int ath_reset_vap(struct ieee80211vap *, u_long);
155 static void ath_start_queue(struct ifnet *ifp);
156 static int ath_media_change(struct ifnet *);
157 static void ath_watchdog(void *);
158 static int ath_ioctl(struct ifnet *, u_long, caddr_t);
159 static void ath_fatal_proc(void *, int);
160 static void ath_bmiss_vap(struct ieee80211vap *);
161 static void ath_bmiss_proc(void *, int);
162 static void ath_key_update_begin(struct ieee80211vap *);
163 static void ath_key_update_end(struct ieee80211vap *);
164 static void ath_update_mcast(struct ifnet *);
165 static void ath_update_promisc(struct ifnet *);
166 static void ath_updateslot(struct ifnet *);
167 static void ath_bstuck_proc(void *, int);
168 static void ath_reset_proc(void *, int);
169 static int ath_desc_alloc(struct ath_softc *);
170 static void ath_desc_free(struct ath_softc *);
171 static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
172 const uint8_t [IEEE80211_ADDR_LEN]);
173 static void ath_node_cleanup(struct ieee80211_node *);
174 static void ath_node_free(struct ieee80211_node *);
175 static void ath_node_getsignal(const struct ieee80211_node *,
177 static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
178 static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
179 static int ath_tx_setup(struct ath_softc *, int, int);
180 static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
181 static void ath_tx_cleanup(struct ath_softc *);
182 static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq,
184 static void ath_tx_proc_q0(void *, int);
185 static void ath_tx_proc_q0123(void *, int);
186 static void ath_tx_proc(void *, int);
187 static void ath_txq_sched_tasklet(void *, int);
188 static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
189 static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
190 static void ath_scan_start(struct ieee80211com *);
191 static void ath_scan_end(struct ieee80211com *);
192 static void ath_set_channel(struct ieee80211com *);
193 #ifdef ATH_ENABLE_11N
194 static void ath_update_chw(struct ieee80211com *);
195 #endif /* ATH_ENABLE_11N */
196 static void ath_calibrate(void *);
197 static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
198 static void ath_setup_stationkey(struct ieee80211_node *);
199 static void ath_newassoc(struct ieee80211_node *, int);
200 static int ath_setregdomain(struct ieee80211com *,
201 struct ieee80211_regdomain *, int,
202 struct ieee80211_channel []);
203 static void ath_getradiocaps(struct ieee80211com *, int, int *,
204 struct ieee80211_channel []);
205 static int ath_getchannels(struct ath_softc *);
207 static int ath_rate_setup(struct ath_softc *, u_int mode);
208 static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
210 static void ath_announce(struct ath_softc *);
212 static void ath_dfs_tasklet(void *, int);
213 static void ath_node_powersave(struct ieee80211_node *, int);
214 static int ath_node_set_tim(struct ieee80211_node *, int);
216 #ifdef IEEE80211_SUPPORT_TDMA
217 #include <dev/ath/if_ath_tdma.h>
220 SYSCTL_DECL(_hw_ath);
222 /* XXX validate sysctl values */
223 static int ath_longcalinterval = 30; /* long cals every 30 secs */
224 SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
225 0, "long chip calibration interval (secs)");
226 static int ath_shortcalinterval = 100; /* short cals every 100 ms */
227 SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
228 0, "short chip calibration interval (msecs)");
229 static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */
230 SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
231 0, "reset chip calibration results (secs)");
232 static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */
233 SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval,
234 0, "ANI calibration (msecs)");
236 int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
237 SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
238 0, "rx buffers allocated");
239 TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
240 int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
241 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
242 0, "tx buffers allocated");
243 TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
244 int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */
245 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt,
246 0, "tx (mgmt) buffers allocated");
247 TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt);
249 int ath_bstuck_threshold = 4; /* max missed beacons */
250 SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
251 0, "max missed beacon xmits before chip reset");
253 MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
256 ath_legacy_attach_comp_func(struct ath_softc *sc)
260 * Special case certain configurations. Note the
261 * CAB queue is handled by these specially so don't
262 * include them when checking the txq setup mask.
264 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
266 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
269 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
272 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
277 #define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20)
278 #define HAL_MODE_HT40 \
279 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \
280 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS)
282 ath_attach(u_int16_t devid, struct ath_softc *sc)
285 struct ieee80211com *ic;
286 struct ath_hal *ah = NULL;
290 uint8_t macaddr[IEEE80211_ADDR_LEN];
291 int rx_chainmask, tx_chainmask;
293 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
296 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
298 device_printf(sc->sc_dev, "can not if_alloc()\n");
305 /* set these up early for if_printf use */
306 if_initname(ifp, device_get_name(sc->sc_dev),
307 device_get_unit(sc->sc_dev));
310 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh,
311 sc->sc_eepromdata, &status);
313 if_printf(ifp, "unable to attach hardware; HAL status %u\n",
319 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
321 sc->sc_debug = ath_debug;
325 * Setup the DMA/EDMA functions based on the current
328 * This is required before the descriptors are allocated.
330 if (ath_hal_hasedma(sc->sc_ah)) {
332 ath_recv_setup_edma(sc);
333 ath_xmit_setup_edma(sc);
335 ath_recv_setup_legacy(sc);
336 ath_xmit_setup_legacy(sc);
340 * Check if the MAC has multi-rate retry support.
341 * We do this by trying to setup a fake extended
342 * descriptor. MAC's that don't have support will
343 * return false w/o doing anything. MAC's that do
344 * support it will return true w/o doing anything.
346 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
349 * Check if the device has hardware counters for PHY
350 * errors. If so we need to enable the MIB interrupt
351 * so we can act on stat triggers.
353 if (ath_hal_hwphycounters(ah))
357 * Get the hardware key cache size.
359 sc->sc_keymax = ath_hal_keycachesize(ah);
360 if (sc->sc_keymax > ATH_KEYMAX) {
361 if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
362 ATH_KEYMAX, sc->sc_keymax);
363 sc->sc_keymax = ATH_KEYMAX;
366 * Reset the key cache since some parts do not
367 * reset the contents on initial power up.
369 for (i = 0; i < sc->sc_keymax; i++)
370 ath_hal_keyreset(ah, i);
373 * Collect the default channel list.
375 error = ath_getchannels(sc);
380 * Setup rate tables for all potential media types.
382 ath_rate_setup(sc, IEEE80211_MODE_11A);
383 ath_rate_setup(sc, IEEE80211_MODE_11B);
384 ath_rate_setup(sc, IEEE80211_MODE_11G);
385 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
386 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
387 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
388 ath_rate_setup(sc, IEEE80211_MODE_11NA);
389 ath_rate_setup(sc, IEEE80211_MODE_11NG);
390 ath_rate_setup(sc, IEEE80211_MODE_HALF);
391 ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
393 /* NB: setup here so ath_rate_update is happy */
394 ath_setcurmode(sc, IEEE80211_MODE_11A);
397 * Allocate TX descriptors and populate the lists.
399 error = ath_desc_alloc(sc);
401 if_printf(ifp, "failed to allocate TX descriptors: %d\n",
405 error = ath_txdma_setup(sc);
407 if_printf(ifp, "failed to allocate TX descriptors: %d\n",
413 * Allocate RX descriptors and populate the lists.
415 error = ath_rxdma_setup(sc);
417 if_printf(ifp, "failed to allocate RX descriptors: %d\n",
422 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0);
423 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0);
425 ATH_TXBUF_LOCK_INIT(sc);
427 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT,
428 taskqueue_thread_enqueue, &sc->sc_tq);
429 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
430 "%s taskq", ifp->if_xname);
432 TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc);
433 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
434 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
435 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc);
436 TASK_INIT(&sc->sc_txqtask, 0, ath_txq_sched_tasklet, sc);
437 TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc);
439 /* XXX make this a higher priority taskqueue? */
440 TASK_INIT(&sc->sc_txpkttask, 0, ath_start_task, sc);
443 * Allocate hardware transmit queues: one queue for
444 * beacon frames and one data queue for each QoS
445 * priority. Note that the hal handles resetting
446 * these queues at the needed time.
450 sc->sc_bhalq = ath_beaconq_setup(sc);
451 if (sc->sc_bhalq == (u_int) -1) {
452 if_printf(ifp, "unable to setup a beacon xmit queue!\n");
456 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
457 if (sc->sc_cabq == NULL) {
458 if_printf(ifp, "unable to setup CAB xmit queue!\n");
462 /* NB: insure BK queue is the lowest priority h/w queue */
463 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
464 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
465 ieee80211_wme_acnames[WME_AC_BK]);
469 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
470 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
471 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
473 * Not enough hardware tx queues to properly do WME;
474 * just punt and assign them all to the same h/w queue.
475 * We could do a better job of this if, for example,
476 * we allocate queues when we switch from station to
479 if (sc->sc_ac2q[WME_AC_VI] != NULL)
480 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
481 if (sc->sc_ac2q[WME_AC_BE] != NULL)
482 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
483 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
484 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
485 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
489 * Attach the TX completion function.
491 * The non-EDMA chips may have some special case optimisations;
492 * this method gives everyone a chance to attach cleanly.
494 sc->sc_tx.xmit_attach_comp_func(sc);
497 * Setup rate control. Some rate control modules
498 * call back to change the anntena state so expose
499 * the necessary entry points.
500 * XXX maybe belongs in struct ath_ratectrl?
502 sc->sc_setdefantenna = ath_setdefantenna;
503 sc->sc_rc = ath_rate_attach(sc);
504 if (sc->sc_rc == NULL) {
509 /* Attach DFS module */
510 if (! ath_dfs_attach(sc)) {
511 device_printf(sc->sc_dev,
512 "%s: unable to attach DFS\n", __func__);
517 /* Attach spectral module */
518 if (ath_spectral_attach(sc) < 0) {
519 device_printf(sc->sc_dev,
520 "%s: unable to attach spectral\n", __func__);
525 /* Start DFS processing tasklet */
526 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc);
528 /* Configure LED state */
531 sc->sc_ledon = 0; /* low true */
532 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */
533 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE);
536 * Don't setup hardware-based blinking.
538 * Although some NICs may have this configured in the
539 * default reset register values, the user may wish
540 * to alter which pins have which function.
542 * The reference driver attaches the MAC network LED to GPIO1 and
543 * the MAC power LED to GPIO2. However, the DWA-552 cardbus
544 * NIC has these reversed.
546 sc->sc_hardled = (1 == 0);
547 sc->sc_led_net_pin = -1;
548 sc->sc_led_pwr_pin = -1;
550 * Auto-enable soft led processing for IBM cards and for
551 * 5211 minipci cards. Users can also manually enable/disable
552 * support with a sysctl.
554 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
556 ath_hal_setledstate(ah, HAL_LED_INIT);
559 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
560 ifp->if_start = ath_start_queue;
561 ifp->if_ioctl = ath_ioctl;
562 ifp->if_init = ath_init;
563 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
564 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
565 IFQ_SET_READY(&ifp->if_snd);
568 /* XXX not right but it's not used anywhere important */
569 ic->ic_phytype = IEEE80211_T_OFDM;
570 ic->ic_opmode = IEEE80211_M_STA;
572 IEEE80211_C_STA /* station mode */
573 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
574 | IEEE80211_C_HOSTAP /* hostap mode */
575 | IEEE80211_C_MONITOR /* monitor mode */
576 | IEEE80211_C_AHDEMO /* adhoc demo mode */
577 | IEEE80211_C_WDS /* 4-address traffic works */
578 | IEEE80211_C_MBSS /* mesh point link mode */
579 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
580 | IEEE80211_C_SHSLOT /* short slot time supported */
581 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
582 #ifndef ATH_ENABLE_11N
583 | IEEE80211_C_BGSCAN /* capable of bg scanning */
585 | IEEE80211_C_TXFRAG /* handle tx frags */
586 #ifdef ATH_ENABLE_DFS
587 | IEEE80211_C_DFS /* Enable radar detection */
591 * Query the hal to figure out h/w crypto support.
593 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
594 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
595 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
596 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
597 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
598 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
599 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
600 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
601 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
602 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
604 * Check if h/w does the MIC and/or whether the
605 * separate key cache entries are required to
606 * handle both tx+rx MIC keys.
608 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
609 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
611 * If the h/w supports storing tx+rx MIC keys
612 * in one cache slot automatically enable use.
614 if (ath_hal_hastkipsplit(ah) ||
615 !ath_hal_settkipsplit(ah, AH_FALSE))
618 * If the h/w can do TKIP MIC together with WME then
619 * we use it; otherwise we force the MIC to be done
620 * in software by the net80211 layer.
622 if (ath_hal_haswmetkipmic(ah))
623 sc->sc_wmetkipmic = 1;
625 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
627 * Check for multicast key search support.
629 if (ath_hal_hasmcastkeysearch(sc->sc_ah) &&
630 !ath_hal_getmcastkeysearch(sc->sc_ah)) {
631 ath_hal_setmcastkeysearch(sc->sc_ah, 1);
633 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
635 * Mark key cache slots associated with global keys
636 * as in use. If we knew TKIP was not to be used we
637 * could leave the +32, +64, and +32+64 slots free.
639 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
640 setbit(sc->sc_keymap, i);
641 setbit(sc->sc_keymap, i+64);
642 if (sc->sc_splitmic) {
643 setbit(sc->sc_keymap, i+32);
644 setbit(sc->sc_keymap, i+32+64);
648 * TPC support can be done either with a global cap or
649 * per-packet support. The latter is not available on
650 * all parts. We're a bit pedantic here as all parts
651 * support a global cap.
653 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
654 ic->ic_caps |= IEEE80211_C_TXPMGT;
657 * Mark WME capability only if we have sufficient
658 * hardware queues to do proper priority scheduling.
660 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
661 ic->ic_caps |= IEEE80211_C_WME;
663 * Check for misc other capabilities.
665 if (ath_hal_hasbursting(ah))
666 ic->ic_caps |= IEEE80211_C_BURST;
667 sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
668 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah);
669 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
670 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah);
671 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah);
672 if (ath_hal_hasfastframes(ah))
673 ic->ic_caps |= IEEE80211_C_FF;
674 wmodes = ath_hal_getwirelessmodes(ah);
675 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
676 ic->ic_caps |= IEEE80211_C_TURBOP;
677 #ifdef IEEE80211_SUPPORT_TDMA
678 if (ath_hal_macversion(ah) > 0x78) {
679 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
680 ic->ic_tdma_update = ath_tdma_update;
685 * TODO: enforce that at least this many frames are available
686 * in the txbuf list before allowing data frames (raw or
687 * otherwise) to be transmitted.
689 sc->sc_txq_data_minfree = 10;
691 * Leave this as default to maintain legacy behaviour.
692 * Shortening the cabq/mcastq may end up causing some
693 * undesirable behaviour.
695 sc->sc_txq_mcastq_maxdepth = ath_txbuf;
697 /* Enable CABQ by default */
698 sc->sc_cabq_enable = 1;
701 * Allow the TX and RX chainmasks to be overridden by
702 * environment variables and/or device.hints.
704 * This must be done early - before the hardware is
705 * calibrated or before the 802.11n stream calculation
708 if (resource_int_value(device_get_name(sc->sc_dev),
709 device_get_unit(sc->sc_dev), "rx_chainmask",
710 &rx_chainmask) == 0) {
711 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n",
713 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask);
715 if (resource_int_value(device_get_name(sc->sc_dev),
716 device_get_unit(sc->sc_dev), "tx_chainmask",
717 &tx_chainmask) == 0) {
718 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n",
720 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask);
724 * Disable MRR with protected frames by default.
725 * Only 802.11n series NICs can handle this.
727 sc->sc_mrrprot = 0; /* XXX should be a capability */
730 * Query the enterprise mode information the HAL.
732 if (ath_hal_getcapability(ah, HAL_CAP_ENTERPRISE_MODE, 0,
733 &sc->sc_ent_cfg) == HAL_OK)
736 #ifdef ATH_ENABLE_11N
738 * Query HT capabilities
740 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK &&
741 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) {
744 device_printf(sc->sc_dev, "[HT] enabling HT modes\n");
746 sc->sc_mrrprot = 1; /* XXX should be a capability */
748 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */
749 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */
750 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */
751 | IEEE80211_HTCAP_MAXAMSDU_3839
752 /* max A-MSDU length */
753 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */
757 * Enable short-GI for HT20 only if the hardware
758 * advertises support.
759 * Notably, anything earlier than the AR9287 doesn't.
761 if ((ath_hal_getcapability(ah,
762 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) &&
763 (wmodes & HAL_MODE_HT20)) {
764 device_printf(sc->sc_dev,
765 "[HT] enabling short-GI in 20MHz mode\n");
766 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20;
769 if (wmodes & HAL_MODE_HT40)
770 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40
771 | IEEE80211_HTCAP_SHORTGI40;
774 * TX/RX streams need to be taken into account when
775 * negotiating which MCS rates it'll receive and
776 * what MCS rates are available for TX.
778 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs);
779 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs);
781 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask);
782 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask);
784 ic->ic_txstream = txs;
785 ic->ic_rxstream = rxs;
788 * Setup TX and RX STBC based on what the HAL allows and
789 * the currently configured chainmask set.
790 * Ie - don't enable STBC TX if only one chain is enabled.
791 * STBC RX is fine on a single RX chain; it just won't
792 * provide any real benefit.
794 if (ath_hal_getcapability(ah, HAL_CAP_RX_STBC, 0,
797 device_printf(sc->sc_dev,
798 "[HT] 1 stream STBC receive enabled\n");
799 ic->ic_htcaps |= IEEE80211_HTCAP_RXSTBC_1STREAM;
801 if (txs > 1 && ath_hal_getcapability(ah, HAL_CAP_TX_STBC, 0,
804 device_printf(sc->sc_dev,
805 "[HT] 1 stream STBC transmit enabled\n");
806 ic->ic_htcaps |= IEEE80211_HTCAP_TXSTBC;
809 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1,
810 &sc->sc_rts_aggr_limit);
811 if (sc->sc_rts_aggr_limit != (64 * 1024))
812 device_printf(sc->sc_dev,
813 "[HT] RTS aggregates limited to %d KiB\n",
814 sc->sc_rts_aggr_limit / 1024);
816 device_printf(sc->sc_dev,
817 "[HT] %d RX streams; %d TX streams\n", rxs, txs);
822 * Initial aggregation settings.
824 sc->sc_hwq_limit = ATH_AGGR_MIN_QDEPTH;
825 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW;
826 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH;
827 sc->sc_aggr_limit = ATH_AGGR_MAXSIZE;
828 sc->sc_delim_min_pad = 0;
831 * Check if the hardware requires PCI register serialisation.
832 * Some of the Owl based MACs require this.
835 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR,
836 0, NULL) == HAL_OK) {
837 sc->sc_ah->ah_config.ah_serialise_reg_war = 1;
838 device_printf(sc->sc_dev,
839 "Enabling register serialisation\n");
843 * Initialise the deferred completed RX buffer list.
845 TAILQ_INIT(&sc->sc_rx_rxlist);
848 * Indicate we need the 802.11 header padded to a
849 * 32-bit boundary for 4-address and QoS frames.
851 ic->ic_flags |= IEEE80211_F_DATAPAD;
854 * Query the hal about antenna support.
856 sc->sc_defant = ath_hal_getdefantenna(ah);
859 * Not all chips have the VEOL support we want to
860 * use with IBSS beacons; check here for it.
862 sc->sc_hasveol = ath_hal_hasveol(ah);
864 /* get mac address from hardware */
865 ath_hal_getmac(ah, macaddr);
867 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
869 /* NB: used to size node table key mapping array */
870 ic->ic_max_keyix = sc->sc_keymax;
871 /* call MI attach routine. */
872 ieee80211_ifattach(ic, macaddr);
873 ic->ic_setregdomain = ath_setregdomain;
874 ic->ic_getradiocaps = ath_getradiocaps;
875 sc->sc_opmode = HAL_M_STA;
877 /* override default methods */
878 ic->ic_newassoc = ath_newassoc;
879 ic->ic_updateslot = ath_updateslot;
880 ic->ic_wme.wme_update = ath_wme_update;
881 ic->ic_vap_create = ath_vap_create;
882 ic->ic_vap_delete = ath_vap_delete;
883 ic->ic_raw_xmit = ath_raw_xmit;
884 ic->ic_update_mcast = ath_update_mcast;
885 ic->ic_update_promisc = ath_update_promisc;
886 ic->ic_node_alloc = ath_node_alloc;
887 sc->sc_node_free = ic->ic_node_free;
888 ic->ic_node_free = ath_node_free;
889 sc->sc_node_cleanup = ic->ic_node_cleanup;
890 ic->ic_node_cleanup = ath_node_cleanup;
891 ic->ic_node_getsignal = ath_node_getsignal;
892 ic->ic_scan_start = ath_scan_start;
893 ic->ic_scan_end = ath_scan_end;
894 ic->ic_set_channel = ath_set_channel;
895 #ifdef ATH_ENABLE_11N
896 /* 802.11n specific - but just override anyway */
897 sc->sc_addba_request = ic->ic_addba_request;
898 sc->sc_addba_response = ic->ic_addba_response;
899 sc->sc_addba_stop = ic->ic_addba_stop;
900 sc->sc_bar_response = ic->ic_bar_response;
901 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout;
903 ic->ic_addba_request = ath_addba_request;
904 ic->ic_addba_response = ath_addba_response;
905 ic->ic_addba_response_timeout = ath_addba_response_timeout;
906 ic->ic_addba_stop = ath_addba_stop;
907 ic->ic_bar_response = ath_bar_response;
909 ic->ic_update_chw = ath_update_chw;
910 #endif /* ATH_ENABLE_11N */
912 #ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT
914 * There's one vendor bitmap entry in the RX radiotap
915 * header; make sure that's taken into account.
917 ieee80211_radiotap_attachv(ic,
918 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0,
919 ATH_TX_RADIOTAP_PRESENT,
920 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1,
921 ATH_RX_RADIOTAP_PRESENT);
924 * No vendor bitmap/extensions are present.
926 ieee80211_radiotap_attach(ic,
927 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
928 ATH_TX_RADIOTAP_PRESENT,
929 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
930 ATH_RX_RADIOTAP_PRESENT);
931 #endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
934 * Setup the ALQ logging if required
937 if_ath_alq_init(&sc->sc_alq, device_get_nameunit(sc->sc_dev));
938 if_ath_alq_setcfg(&sc->sc_alq,
939 sc->sc_ah->ah_macVersion,
940 sc->sc_ah->ah_macRev,
941 sc->sc_ah->ah_phyRev,
942 sc->sc_ah->ah_magic);
946 * Setup dynamic sysctl's now that country code and
947 * regdomain are available from the hal.
949 ath_sysctlattach(sc);
950 ath_sysctl_stats_attach(sc);
951 ath_sysctl_hal_attach(sc);
954 ieee80211_announce(ic);
960 ath_txdma_teardown(sc);
961 ath_rxdma_teardown(sc);
967 * To work around scoping issues with CURVNET_SET/CURVNET_RESTORE..
969 if (ifp != NULL && ifp->if_vnet) {
970 CURVNET_SET(ifp->if_vnet);
973 } else if (ifp != NULL)
980 ath_detach(struct ath_softc *sc)
982 struct ifnet *ifp = sc->sc_ifp;
984 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
985 __func__, ifp->if_flags);
988 * NB: the order of these is important:
989 * o stop the chip so no more interrupts will fire
990 * o call the 802.11 layer before detaching the hal to
991 * insure callbacks into the driver to delete global
992 * key cache entries can be handled
993 * o free the taskqueue which drains any pending tasks
994 * o reclaim the tx queue data structures after calling
995 * the 802.11 layer as we'll get called back to reclaim
996 * node state and potentially want to use them
997 * o to cleanup the tx queues the hal is called, so detach
999 * Other than that, it's straightforward...
1002 ieee80211_ifdetach(ifp->if_l2com);
1003 taskqueue_free(sc->sc_tq);
1004 #ifdef ATH_TX99_DIAG
1005 if (sc->sc_tx99 != NULL)
1006 sc->sc_tx99->detach(sc->sc_tx99);
1008 ath_rate_detach(sc->sc_rc);
1009 #ifdef ATH_DEBUG_ALQ
1010 if_ath_alq_tidyup(&sc->sc_alq);
1012 ath_spectral_detach(sc);
1015 ath_txdma_teardown(sc);
1016 ath_rxdma_teardown(sc);
1018 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */
1020 CURVNET_SET(ifp->if_vnet);
1028 * MAC address handling for multiple BSS on the same radio.
1029 * The first vap uses the MAC address from the EEPROM. For
1030 * subsequent vap's we set the U/L bit (bit 1) in the MAC
1031 * address and use the next six bits as an index.
1034 assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
1038 if (clone && sc->sc_hasbmask) {
1039 /* NB: we only do this if h/w supports multiple bssid */
1040 for (i = 0; i < 8; i++)
1041 if ((sc->sc_bssidmask & (1<<i)) == 0)
1044 mac[0] |= (i << 2)|0x2;
1047 sc->sc_bssidmask |= 1<<i;
1048 sc->sc_hwbssidmask[0] &= ~mac[0];
1054 reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
1056 int i = mac[0] >> 2;
1059 if (i != 0 || --sc->sc_nbssid0 == 0) {
1060 sc->sc_bssidmask &= ~(1<<i);
1061 /* recalculate bssid mask from remaining addresses */
1063 for (i = 1; i < 8; i++)
1064 if (sc->sc_bssidmask & (1<<i))
1065 mask &= ~((i<<2)|0x2);
1066 sc->sc_hwbssidmask[0] |= mask;
1071 * Assign a beacon xmit slot. We try to space out
1072 * assignments so when beacons are staggered the
1073 * traffic coming out of the cab q has maximal time
1074 * to go out before the next beacon is scheduled.
1077 assign_bslot(struct ath_softc *sc)
1082 for (slot = 0; slot < ATH_BCBUF; slot++)
1083 if (sc->sc_bslot[slot] == NULL) {
1084 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
1085 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
1088 /* NB: keep looking for a double slot */
1093 static struct ieee80211vap *
1094 ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
1095 enum ieee80211_opmode opmode, int flags,
1096 const uint8_t bssid[IEEE80211_ADDR_LEN],
1097 const uint8_t mac0[IEEE80211_ADDR_LEN])
1099 struct ath_softc *sc = ic->ic_ifp->if_softc;
1100 struct ath_vap *avp;
1101 struct ieee80211vap *vap;
1102 uint8_t mac[IEEE80211_ADDR_LEN];
1103 int needbeacon, error;
1104 enum ieee80211_opmode ic_opmode;
1106 avp = (struct ath_vap *) malloc(sizeof(struct ath_vap),
1107 M_80211_VAP, M_WAITOK | M_ZERO);
1109 IEEE80211_ADDR_COPY(mac, mac0);
1112 ic_opmode = opmode; /* default to opmode of new vap */
1114 case IEEE80211_M_STA:
1115 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */
1116 device_printf(sc->sc_dev, "only 1 sta vap supported\n");
1121 * With multiple vaps we must fall back
1122 * to s/w beacon miss handling.
1124 flags |= IEEE80211_CLONE_NOBEACONS;
1126 if (flags & IEEE80211_CLONE_NOBEACONS) {
1128 * Station mode w/o beacons are implemented w/ AP mode.
1130 ic_opmode = IEEE80211_M_HOSTAP;
1133 case IEEE80211_M_IBSS:
1134 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */
1135 device_printf(sc->sc_dev,
1136 "only 1 ibss vap supported\n");
1141 case IEEE80211_M_AHDEMO:
1142 #ifdef IEEE80211_SUPPORT_TDMA
1143 if (flags & IEEE80211_CLONE_TDMA) {
1144 if (sc->sc_nvaps != 0) {
1145 device_printf(sc->sc_dev,
1146 "only 1 tdma vap supported\n");
1150 flags |= IEEE80211_CLONE_NOBEACONS;
1154 case IEEE80211_M_MONITOR:
1155 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
1157 * Adopt existing mode. Adding a monitor or ahdemo
1158 * vap to an existing configuration is of dubious
1159 * value but should be ok.
1161 /* XXX not right for monitor mode */
1162 ic_opmode = ic->ic_opmode;
1165 case IEEE80211_M_HOSTAP:
1166 case IEEE80211_M_MBSS:
1169 case IEEE80211_M_WDS:
1170 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) {
1171 device_printf(sc->sc_dev,
1172 "wds not supported in sta mode\n");
1176 * Silently remove any request for a unique
1177 * bssid; WDS vap's always share the local
1180 flags &= ~IEEE80211_CLONE_BSSID;
1181 if (sc->sc_nvaps == 0)
1182 ic_opmode = IEEE80211_M_HOSTAP;
1184 ic_opmode = ic->ic_opmode;
1187 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
1191 * Check that a beacon buffer is available; the code below assumes it.
1193 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) {
1194 device_printf(sc->sc_dev, "no beacon buffer available\n");
1199 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
1200 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
1201 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1205 /* XXX can't hold mutex across if_alloc */
1207 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
1211 device_printf(sc->sc_dev, "%s: error %d creating vap\n",
1216 /* h/w crypto support */
1217 vap->iv_key_alloc = ath_key_alloc;
1218 vap->iv_key_delete = ath_key_delete;
1219 vap->iv_key_set = ath_key_set;
1220 vap->iv_key_update_begin = ath_key_update_begin;
1221 vap->iv_key_update_end = ath_key_update_end;
1223 /* override various methods */
1224 avp->av_recv_mgmt = vap->iv_recv_mgmt;
1225 vap->iv_recv_mgmt = ath_recv_mgmt;
1226 vap->iv_reset = ath_reset_vap;
1227 vap->iv_update_beacon = ath_beacon_update;
1228 avp->av_newstate = vap->iv_newstate;
1229 vap->iv_newstate = ath_newstate;
1230 avp->av_bmiss = vap->iv_bmiss;
1231 vap->iv_bmiss = ath_bmiss_vap;
1233 avp->av_node_ps = vap->iv_node_ps;
1234 vap->iv_node_ps = ath_node_powersave;
1236 avp->av_set_tim = vap->iv_set_tim;
1237 vap->iv_set_tim = ath_node_set_tim;
1239 /* Set default parameters */
1242 * Anything earlier than some AR9300 series MACs don't
1243 * support a smaller MPDU density.
1245 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8;
1247 * All NICs can handle the maximum size, however
1248 * AR5416 based MACs can only TX aggregates w/ RTS
1249 * protection when the total aggregate size is <= 8k.
1250 * However, for now that's enforced by the TX path.
1252 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
1257 * Allocate beacon state and setup the q for buffered
1258 * multicast frames. We know a beacon buffer is
1259 * available because we checked above.
1261 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf);
1262 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list);
1263 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
1265 * Assign the vap to a beacon xmit slot. As above
1266 * this cannot fail to find a free one.
1268 avp->av_bslot = assign_bslot(sc);
1269 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
1270 ("beacon slot %u not empty", avp->av_bslot));
1271 sc->sc_bslot[avp->av_bslot] = vap;
1274 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
1276 * Multple vaps are to transmit beacons and we
1277 * have h/w support for TSF adjusting; enable
1278 * use of staggered beacons.
1280 sc->sc_stagbeacons = 1;
1282 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
1285 ic->ic_opmode = ic_opmode;
1286 if (opmode != IEEE80211_M_WDS) {
1288 if (opmode == IEEE80211_M_STA)
1290 if (opmode == IEEE80211_M_MBSS)
1293 switch (ic_opmode) {
1294 case IEEE80211_M_IBSS:
1295 sc->sc_opmode = HAL_M_IBSS;
1297 case IEEE80211_M_STA:
1298 sc->sc_opmode = HAL_M_STA;
1300 case IEEE80211_M_AHDEMO:
1301 #ifdef IEEE80211_SUPPORT_TDMA
1302 if (vap->iv_caps & IEEE80211_C_TDMA) {
1304 /* NB: disable tsf adjust */
1305 sc->sc_stagbeacons = 0;
1308 * NB: adhoc demo mode is a pseudo mode; to the hal it's
1313 case IEEE80211_M_HOSTAP:
1314 case IEEE80211_M_MBSS:
1315 sc->sc_opmode = HAL_M_HOSTAP;
1317 case IEEE80211_M_MONITOR:
1318 sc->sc_opmode = HAL_M_MONITOR;
1321 /* XXX should not happen */
1324 if (sc->sc_hastsfadd) {
1326 * Configure whether or not TSF adjust should be done.
1328 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
1330 if (flags & IEEE80211_CLONE_NOBEACONS) {
1332 * Enable s/w beacon miss handling.
1338 /* complete setup */
1339 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
1342 reclaim_address(sc, mac);
1343 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1345 free(avp, M_80211_VAP);
1351 ath_vap_delete(struct ieee80211vap *vap)
1353 struct ieee80211com *ic = vap->iv_ic;
1354 struct ifnet *ifp = ic->ic_ifp;
1355 struct ath_softc *sc = ifp->if_softc;
1356 struct ath_hal *ah = sc->sc_ah;
1357 struct ath_vap *avp = ATH_VAP(vap);
1359 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
1360 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1362 * Quiesce the hardware while we remove the vap. In
1363 * particular we need to reclaim all references to
1364 * the vap state by any frames pending on the tx queues.
1366 ath_hal_intrset(ah, 0); /* disable interrupts */
1367 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */
1368 /* XXX Do all frames from all vaps/nodes need draining here? */
1369 ath_stoprecv(sc, 1); /* stop recv side */
1372 ieee80211_vap_detach(vap);
1375 * XXX Danger Will Robinson! Danger!
1377 * Because ieee80211_vap_detach() can queue a frame (the station
1378 * diassociate message?) after we've drained the TXQ and
1379 * flushed the software TXQ, we will end up with a frame queued
1380 * to a node whose vap is about to be freed.
1382 * To work around this, flush the hardware/software again.
1383 * This may be racy - the ath task may be running and the packet
1384 * may be being scheduled between sw->hw txq. Tsk.
1386 * TODO: figure out why a new node gets allocated somewhere around
1387 * here (after the ath_tx_swq() call; and after an ath_stop_locked()
1391 ath_draintxq(sc, ATH_RESET_DEFAULT);
1395 * Reclaim beacon state. Note this must be done before
1396 * the vap instance is reclaimed as we may have a reference
1397 * to it in the buffer for the beacon frame.
1399 if (avp->av_bcbuf != NULL) {
1400 if (avp->av_bslot != -1) {
1401 sc->sc_bslot[avp->av_bslot] = NULL;
1404 ath_beacon_return(sc, avp->av_bcbuf);
1405 avp->av_bcbuf = NULL;
1406 if (sc->sc_nbcnvaps == 0) {
1407 sc->sc_stagbeacons = 0;
1408 if (sc->sc_hastsfadd)
1409 ath_hal_settsfadjust(sc->sc_ah, 0);
1412 * Reclaim any pending mcast frames for the vap.
1414 ath_tx_draintxq(sc, &avp->av_mcastq);
1417 * Update bookkeeping.
1419 if (vap->iv_opmode == IEEE80211_M_STA) {
1421 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1423 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1424 vap->iv_opmode == IEEE80211_M_MBSS) {
1425 reclaim_address(sc, vap->iv_myaddr);
1426 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1427 if (vap->iv_opmode == IEEE80211_M_MBSS)
1430 if (vap->iv_opmode != IEEE80211_M_WDS)
1432 #ifdef IEEE80211_SUPPORT_TDMA
1433 /* TDMA operation ceases when the last vap is destroyed */
1434 if (sc->sc_tdma && sc->sc_nvaps == 0) {
1439 free(avp, M_80211_VAP);
1441 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1443 * Restart rx+tx machines if still running (RUNNING will
1444 * be reset if we just destroyed the last vap).
1446 if (ath_startrecv(sc) != 0)
1447 if_printf(ifp, "%s: unable to restart recv logic\n",
1449 if (sc->sc_beacons) { /* restart beacons */
1450 #ifdef IEEE80211_SUPPORT_TDMA
1452 ath_tdma_config(sc, NULL);
1455 ath_beacon_config(sc, NULL);
1457 ath_hal_intrset(ah, sc->sc_imask);
1463 ath_suspend(struct ath_softc *sc)
1465 struct ifnet *ifp = sc->sc_ifp;
1466 struct ieee80211com *ic = ifp->if_l2com;
1468 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1469 __func__, ifp->if_flags);
1471 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
1473 ieee80211_suspend_all(ic);
1475 * NB: don't worry about putting the chip in low power
1476 * mode; pci will power off our socket on suspend and
1477 * CardBus detaches the device.
1481 * XXX ensure none of the taskqueues are running
1482 * XXX ensure sc_invalid is 1
1483 * XXX ensure the calibration callout is disabled
1486 /* Disable the PCIe PHY, complete with workarounds */
1487 ath_hal_enablepcie(sc->sc_ah, 1, 1);
1491 * Reset the key cache since some parts do not reset the
1492 * contents on resume. First we clear all entries, then
1493 * re-load keys that the 802.11 layer assumes are setup
1497 ath_reset_keycache(struct ath_softc *sc)
1499 struct ifnet *ifp = sc->sc_ifp;
1500 struct ieee80211com *ic = ifp->if_l2com;
1501 struct ath_hal *ah = sc->sc_ah;
1504 for (i = 0; i < sc->sc_keymax; i++)
1505 ath_hal_keyreset(ah, i);
1506 ieee80211_crypto_reload_keys(ic);
1510 * Fetch the current chainmask configuration based on the current
1511 * operating channel and options.
1514 ath_update_chainmasks(struct ath_softc *sc, struct ieee80211_channel *chan)
1518 * Set TX chainmask to the currently configured chainmask;
1519 * the TX chainmask depends upon the current operating mode.
1521 sc->sc_cur_rxchainmask = sc->sc_rxchainmask;
1522 if (IEEE80211_IS_CHAN_HT(chan)) {
1523 sc->sc_cur_txchainmask = sc->sc_txchainmask;
1525 sc->sc_cur_txchainmask = 1;
1530 ath_resume(struct ath_softc *sc)
1532 struct ifnet *ifp = sc->sc_ifp;
1533 struct ieee80211com *ic = ifp->if_l2com;
1534 struct ath_hal *ah = sc->sc_ah;
1537 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1538 __func__, ifp->if_flags);
1540 /* Re-enable PCIe, re-enable the PCIe bus */
1541 ath_hal_enablepcie(ah, 0, 0);
1544 * Must reset the chip before we reload the
1545 * keycache as we were powered down on suspend.
1547 ath_update_chainmasks(sc,
1548 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan);
1549 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
1550 sc->sc_cur_rxchainmask);
1551 ath_hal_reset(ah, sc->sc_opmode,
1552 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
1554 ath_reset_keycache(sc);
1556 /* Let DFS at it in case it's a DFS channel */
1557 ath_dfs_radar_enable(sc, ic->ic_curchan);
1559 /* Let spectral at in case spectral is enabled */
1560 ath_spectral_enable(sc, ic->ic_curchan);
1562 /* Restore the LED configuration */
1564 ath_hal_setledstate(ah, HAL_LED_INIT);
1566 if (sc->sc_resume_up)
1567 ieee80211_resume_all(ic);
1573 ath_shutdown(struct ath_softc *sc)
1575 struct ifnet *ifp = sc->sc_ifp;
1577 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1578 __func__, ifp->if_flags);
1581 /* NB: no point powering down chip as we're about to reboot */
1585 * Interrupt handler. Most of the actual processing is deferred.
1590 struct ath_softc *sc = arg;
1591 struct ifnet *ifp = sc->sc_ifp;
1592 struct ath_hal *ah = sc->sc_ah;
1597 * If we're inside a reset path, just print a warning and
1598 * clear the ISR. The reset routine will finish it for us.
1601 if (sc->sc_inreset_cnt) {
1603 ath_hal_getisr(ah, &status); /* clear ISR */
1604 ath_hal_intrset(ah, 0); /* disable further intr's */
1605 DPRINTF(sc, ATH_DEBUG_ANY,
1606 "%s: in reset, ignoring: status=0x%x\n",
1612 if (sc->sc_invalid) {
1614 * The hardware is not ready/present, don't touch anything.
1615 * Note this can happen early on if the IRQ is shared.
1617 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
1621 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */
1626 if ((ifp->if_flags & IFF_UP) == 0 ||
1627 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1630 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1631 __func__, ifp->if_flags);
1632 ath_hal_getisr(ah, &status); /* clear ISR */
1633 ath_hal_intrset(ah, 0); /* disable further intr's */
1639 * Figure out the reason(s) for the interrupt. Note
1640 * that the hal returns a pseudo-ISR that may include
1641 * bits we haven't explicitly enabled so we mask the
1642 * value to insure we only process bits we requested.
1644 ath_hal_getisr(ah, &status); /* NB: clears ISR too */
1645 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
1646 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath_intr: mask=0x%.8x", status);
1647 #ifdef ATH_DEBUG_ALQ
1648 if_ath_alq_post_intr(&sc->sc_alq, status, ah->ah_intrstate,
1650 #endif /* ATH_DEBUG_ALQ */
1651 #ifdef ATH_KTR_INTR_DEBUG
1652 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 5,
1653 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x",
1654 ah->ah_intrstate[0],
1655 ah->ah_intrstate[1],
1656 ah->ah_intrstate[2],
1657 ah->ah_intrstate[3],
1658 ah->ah_intrstate[6]);
1661 /* Squirrel away SYNC interrupt debugging */
1662 if (ah->ah_syncstate != 0) {
1664 for (i = 0; i < 32; i++)
1665 if (ah->ah_syncstate & (i << i))
1666 sc->sc_intr_stats.sync_intr[i]++;
1669 status &= sc->sc_imask; /* discard unasked for bits */
1671 /* Short-circuit un-handled interrupts */
1672 if (status == 0x0) {
1678 * Take a note that we're inside the interrupt handler, so
1679 * the reset routines know to wait.
1685 * Handle the interrupt. We won't run concurrent with the reset
1686 * or channel change routines as they'll wait for sc_intr_cnt
1687 * to be 0 before continuing.
1689 if (status & HAL_INT_FATAL) {
1690 sc->sc_stats.ast_hardware++;
1691 ath_hal_intrset(ah, 0); /* disable intr's until reset */
1692 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask);
1694 if (status & HAL_INT_SWBA) {
1696 * Software beacon alert--time to send a beacon.
1697 * Handle beacon transmission directly; deferring
1698 * this is too slow to meet timing constraints
1701 #ifdef IEEE80211_SUPPORT_TDMA
1703 if (sc->sc_tdmaswba == 0) {
1704 struct ieee80211com *ic = ifp->if_l2com;
1705 struct ieee80211vap *vap =
1706 TAILQ_FIRST(&ic->ic_vaps);
1707 ath_tdma_beacon_send(sc, vap);
1709 vap->iv_tdma->tdma_bintval;
1715 ath_beacon_proc(sc, 0);
1716 #ifdef IEEE80211_SUPPORT_SUPERG
1718 * Schedule the rx taskq in case there's no
1719 * traffic so any frames held on the staging
1720 * queue are aged and potentially flushed.
1722 sc->sc_rx.recv_sched(sc, 1);
1726 if (status & HAL_INT_RXEOL) {
1728 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXEOL");
1731 * NB: the hardware should re-read the link when
1732 * RXE bit is written, but it doesn't work at
1733 * least on older hardware revs.
1735 sc->sc_stats.ast_rxeol++;
1737 * Disable RXEOL/RXORN - prevent an interrupt
1738 * storm until the PCU logic can be reset.
1739 * In case the interface is reset some other
1740 * way before "sc_kickpcu" is called, don't
1741 * modify sc_imask - that way if it is reset
1742 * by a call to ath_reset() somehow, the
1743 * interrupt mask will be correctly reprogrammed.
1745 imask = sc->sc_imask;
1746 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN);
1747 ath_hal_intrset(ah, imask);
1749 * Only blank sc_rxlink if we've not yet kicked
1752 * This isn't entirely correct - the correct solution
1753 * would be to have a PCU lock and engage that for
1754 * the duration of the PCU fiddling; which would include
1755 * running the RX process. Otherwise we could end up
1756 * messing up the RX descriptor chain and making the
1757 * RX desc list much shorter.
1759 if (! sc->sc_kickpcu)
1760 sc->sc_rxlink = NULL;
1764 * Enqueue an RX proc, to handled whatever
1765 * is in the RX queue.
1766 * This will then kick the PCU.
1768 sc->sc_rx.recv_sched(sc, 1);
1770 if (status & HAL_INT_TXURN) {
1771 sc->sc_stats.ast_txurn++;
1772 /* bump tx trigger level */
1773 ath_hal_updatetxtriglevel(ah, AH_TRUE);
1776 * Handle both the legacy and RX EDMA interrupt bits.
1777 * Note that HAL_INT_RXLP is also HAL_INT_RXDESC.
1779 if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) {
1780 sc->sc_stats.ast_rx_intr++;
1781 sc->sc_rx.recv_sched(sc, 1);
1783 if (status & HAL_INT_TX) {
1784 sc->sc_stats.ast_tx_intr++;
1786 * Grab all the currently set bits in the HAL txq bitmap
1787 * and blank them. This is the only place we should be
1790 if (! sc->sc_isedma) {
1793 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs);
1794 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 3,
1795 "ath_intr: TX; txqs=0x%08x, txq_active was 0x%08x, now 0x%08x",
1798 sc->sc_txq_active | txqs);
1799 sc->sc_txq_active |= txqs;
1802 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
1804 if (status & HAL_INT_BMISS) {
1805 sc->sc_stats.ast_bmiss++;
1806 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
1808 if (status & HAL_INT_GTT)
1809 sc->sc_stats.ast_tx_timeout++;
1810 if (status & HAL_INT_CST)
1811 sc->sc_stats.ast_tx_cst++;
1812 if (status & HAL_INT_MIB) {
1813 sc->sc_stats.ast_mib++;
1816 * Disable interrupts until we service the MIB
1817 * interrupt; otherwise it will continue to fire.
1819 ath_hal_intrset(ah, 0);
1821 * Let the hal handle the event. We assume it will
1822 * clear whatever condition caused the interrupt.
1824 ath_hal_mibevent(ah, &sc->sc_halstats);
1826 * Don't reset the interrupt if we've just
1827 * kicked the PCU, or we may get a nested
1828 * RXEOL before the rxproc has had a chance
1831 if (sc->sc_kickpcu == 0)
1832 ath_hal_intrset(ah, sc->sc_imask);
1835 if (status & HAL_INT_RXORN) {
1836 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
1837 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN");
1838 sc->sc_stats.ast_rxorn++;
1847 ath_fatal_proc(void *arg, int pending)
1849 struct ath_softc *sc = arg;
1850 struct ifnet *ifp = sc->sc_ifp;
1855 if_printf(ifp, "hardware error; resetting\n");
1857 * Fatal errors are unrecoverable. Typically these
1858 * are caused by DMA errors. Collect h/w state from
1859 * the hal so we can diagnose what's going on.
1861 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
1862 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
1864 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
1865 state[0], state[1] , state[2], state[3],
1866 state[4], state[5]);
1868 ath_reset(ifp, ATH_RESET_NOLOSS);
1872 ath_bmiss_vap(struct ieee80211vap *vap)
1875 * Workaround phantom bmiss interrupts by sanity-checking
1876 * the time of our last rx'd frame. If it is within the
1877 * beacon miss interval then ignore the interrupt. If it's
1878 * truly a bmiss we'll get another interrupt soon and that'll
1879 * be dispatched up for processing. Note this applies only
1880 * for h/w beacon miss events.
1882 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
1883 struct ifnet *ifp = vap->iv_ic->ic_ifp;
1884 struct ath_softc *sc = ifp->if_softc;
1885 u_int64_t lastrx = sc->sc_lastrx;
1886 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
1887 /* XXX should take a locked ref to iv_bss */
1888 u_int bmisstimeout =
1889 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
1891 DPRINTF(sc, ATH_DEBUG_BEACON,
1892 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
1893 __func__, (unsigned long long) tsf,
1894 (unsigned long long)(tsf - lastrx),
1895 (unsigned long long) lastrx, bmisstimeout);
1897 if (tsf - lastrx <= bmisstimeout) {
1898 sc->sc_stats.ast_bmiss_phantom++;
1902 ATH_VAP(vap)->av_bmiss(vap);
1906 ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
1911 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize))
1913 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
1914 *hangs = *(uint32_t *)sp;
1919 ath_bmiss_proc(void *arg, int pending)
1921 struct ath_softc *sc = arg;
1922 struct ifnet *ifp = sc->sc_ifp;
1925 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
1928 * Do a reset upon any becaon miss event.
1930 * It may be a non-recognised RX clear hang which needs a reset
1933 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
1934 ath_reset(ifp, ATH_RESET_NOLOSS);
1935 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs);
1937 ath_reset(ifp, ATH_RESET_NOLOSS);
1938 ieee80211_beacon_miss(ifp->if_l2com);
1943 * Handle TKIP MIC setup to deal hardware that doesn't do MIC
1944 * calcs together with WME. If necessary disable the crypto
1945 * hardware and mark the 802.11 state so keys will be setup
1946 * with the MIC work done in software.
1949 ath_settkipmic(struct ath_softc *sc)
1951 struct ifnet *ifp = sc->sc_ifp;
1952 struct ieee80211com *ic = ifp->if_l2com;
1954 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
1955 if (ic->ic_flags & IEEE80211_F_WME) {
1956 ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
1957 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
1959 ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
1960 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
1968 struct ath_softc *sc = (struct ath_softc *) arg;
1969 struct ifnet *ifp = sc->sc_ifp;
1970 struct ieee80211com *ic = ifp->if_l2com;
1971 struct ath_hal *ah = sc->sc_ah;
1974 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1975 __func__, ifp->if_flags);
1979 * Stop anything previously setup. This is safe
1980 * whether this is the first time through or not.
1982 ath_stop_locked(ifp);
1985 * The basic interface to setting the hardware in a good
1986 * state is ``reset''. On return the hardware is known to
1987 * be powered up and with interrupts disabled. This must
1988 * be followed by initialization of the appropriate bits
1989 * and then setup of the interrupt mask.
1992 ath_update_chainmasks(sc, ic->ic_curchan);
1993 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
1994 sc->sc_cur_rxchainmask);
1995 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) {
1996 if_printf(ifp, "unable to reset hardware; hal status %u\n",
2001 ath_chan_change(sc, ic->ic_curchan);
2003 /* Let DFS at it in case it's a DFS channel */
2004 ath_dfs_radar_enable(sc, ic->ic_curchan);
2006 /* Let spectral at in case spectral is enabled */
2007 ath_spectral_enable(sc, ic->ic_curchan);
2010 * Likewise this is set during reset so update
2011 * state cached in the driver.
2013 sc->sc_diversity = ath_hal_getdiversity(ah);
2014 sc->sc_lastlongcal = 0;
2015 sc->sc_resetcal = 1;
2016 sc->sc_lastcalreset = 0;
2018 sc->sc_lastshortcal = 0;
2019 sc->sc_doresetcal = AH_FALSE;
2021 * Beacon timers were cleared here; give ath_newstate()
2022 * a hint that the beacon timers should be poked when
2023 * things transition to the RUN state.
2028 * Setup the hardware after reset: the key cache
2029 * is filled as needed and the receive engine is
2030 * set going. Frame transmit is handled entirely
2031 * in the frame output path; there's nothing to do
2032 * here except setup the interrupt mask.
2034 if (ath_startrecv(sc) != 0) {
2035 if_printf(ifp, "unable to start recv logic\n");
2041 * Enable interrupts.
2043 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
2044 | HAL_INT_RXEOL | HAL_INT_RXORN
2046 | HAL_INT_FATAL | HAL_INT_GLOBAL;
2049 * Enable RX EDMA bits. Note these overlap with
2050 * HAL_INT_RX and HAL_INT_RXDESC respectively.
2053 sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP);
2056 * Enable MIB interrupts when there are hardware phy counters.
2057 * Note we only do this (at the moment) for station mode.
2059 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
2060 sc->sc_imask |= HAL_INT_MIB;
2062 /* Enable global TX timeout and carrier sense timeout if available */
2063 if (ath_hal_gtxto_supported(ah))
2064 sc->sc_imask |= HAL_INT_GTT;
2066 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n",
2067 __func__, sc->sc_imask);
2069 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2070 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
2071 ath_hal_intrset(ah, sc->sc_imask);
2075 #ifdef ATH_TX99_DIAG
2076 if (sc->sc_tx99 != NULL)
2077 sc->sc_tx99->start(sc->sc_tx99);
2080 ieee80211_start_all(ic); /* start all vap's */
2084 ath_stop_locked(struct ifnet *ifp)
2086 struct ath_softc *sc = ifp->if_softc;
2087 struct ath_hal *ah = sc->sc_ah;
2089 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
2090 __func__, sc->sc_invalid, ifp->if_flags);
2092 ATH_LOCK_ASSERT(sc);
2093 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2095 * Shutdown the hardware and driver:
2096 * reset 802.11 state machine
2098 * disable interrupts
2099 * turn off the radio
2100 * clear transmit machinery
2101 * clear receive machinery
2102 * drain and release tx queues
2103 * reclaim beacon resources
2104 * power down hardware
2106 * Note that some of this work is not possible if the
2107 * hardware is gone (invalid).
2109 #ifdef ATH_TX99_DIAG
2110 if (sc->sc_tx99 != NULL)
2111 sc->sc_tx99->stop(sc->sc_tx99);
2113 callout_stop(&sc->sc_wd_ch);
2114 sc->sc_wd_timer = 0;
2115 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2116 if (!sc->sc_invalid) {
2117 if (sc->sc_softled) {
2118 callout_stop(&sc->sc_ledtimer);
2119 ath_hal_gpioset(ah, sc->sc_ledpin,
2121 sc->sc_blinking = 0;
2123 ath_hal_intrset(ah, 0);
2125 ath_draintxq(sc, ATH_RESET_DEFAULT);
2126 if (!sc->sc_invalid) {
2127 ath_stoprecv(sc, 1);
2128 ath_hal_phydisable(ah);
2130 sc->sc_rxlink = NULL;
2131 ath_beacon_free(sc); /* XXX not needed */
2135 #define MAX_TXRX_ITERATIONS 1000
2137 ath_txrx_stop_locked(struct ath_softc *sc)
2139 int i = MAX_TXRX_ITERATIONS;
2141 ATH_UNLOCK_ASSERT(sc);
2142 ATH_PCU_LOCK_ASSERT(sc);
2145 * Sleep until all the pending operations have completed.
2147 * The caller must ensure that reset has been incremented
2148 * or the pending operations may continue being queued.
2150 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt ||
2151 sc->sc_txstart_cnt || sc->sc_intr_cnt) {
2154 msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", 1);
2159 device_printf(sc->sc_dev,
2160 "%s: didn't finish after %d iterations\n",
2161 __func__, MAX_TXRX_ITERATIONS);
2163 #undef MAX_TXRX_ITERATIONS
2167 ath_txrx_stop(struct ath_softc *sc)
2169 ATH_UNLOCK_ASSERT(sc);
2170 ATH_PCU_UNLOCK_ASSERT(sc);
2173 ath_txrx_stop_locked(sc);
2179 ath_txrx_start(struct ath_softc *sc)
2182 taskqueue_unblock(sc->sc_tq);
2186 * Grab the reset lock, and wait around until noone else
2187 * is trying to do anything with it.
2189 * This is totally horrible but we can't hold this lock for
2190 * long enough to do TX/RX or we end up with net80211/ip stack
2191 * LORs and eventual deadlock.
2193 * "dowait" signals whether to spin, waiting for the reset
2194 * lock count to reach 0. This should (for now) only be used
2195 * during the reset path, as the rest of the code may not
2196 * be locking-reentrant enough to behave correctly.
2198 * Another, cleaner way should be found to serialise all of
2201 #define MAX_RESET_ITERATIONS 10
2203 ath_reset_grablock(struct ath_softc *sc, int dowait)
2206 int i = MAX_RESET_ITERATIONS;
2208 ATH_PCU_LOCK_ASSERT(sc);
2210 if (sc->sc_inreset_cnt == 0) {
2219 pause("ath_reset_grablock", 1);
2225 * We always increment the refcounter, regardless
2226 * of whether we succeeded to get it in an exclusive
2229 sc->sc_inreset_cnt++;
2232 device_printf(sc->sc_dev,
2233 "%s: didn't finish after %d iterations\n",
2234 __func__, MAX_RESET_ITERATIONS);
2237 device_printf(sc->sc_dev,
2238 "%s: warning, recursive reset path!\n",
2243 #undef MAX_RESET_ITERATIONS
2246 * XXX TODO: write ath_reset_releaselock
2250 ath_stop(struct ifnet *ifp)
2252 struct ath_softc *sc = ifp->if_softc;
2255 ath_stop_locked(ifp);
2260 * Reset the hardware w/o losing operational state. This is
2261 * basically a more efficient way of doing ath_stop, ath_init,
2262 * followed by state transitions to the current 802.11
2263 * operational state. Used to recover from various errors and
2264 * to reset or reload hardware state.
2267 ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type)
2269 struct ath_softc *sc = ifp->if_softc;
2270 struct ieee80211com *ic = ifp->if_l2com;
2271 struct ath_hal *ah = sc->sc_ah;
2275 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
2277 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */
2278 ATH_PCU_UNLOCK_ASSERT(sc);
2279 ATH_UNLOCK_ASSERT(sc);
2281 /* Try to (stop any further TX/RX from occuring */
2282 taskqueue_block(sc->sc_tq);
2285 ath_hal_intrset(ah, 0); /* disable interrupts */
2286 ath_txrx_stop_locked(sc); /* Ensure TX/RX is stopped */
2287 if (ath_reset_grablock(sc, 1) == 0) {
2288 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
2294 * Should now wait for pending TX/RX to complete
2295 * and block future ones from occuring. This needs to be
2296 * done before the TX queue is drained.
2298 ath_draintxq(sc, reset_type); /* stop xmit side */
2301 * Regardless of whether we're doing a no-loss flush or
2302 * not, stop the PCU and handle what's in the RX queue.
2303 * That way frames aren't dropped which shouldn't be.
2305 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS));
2308 ath_settkipmic(sc); /* configure TKIP MIC handling */
2309 /* NB: indicate channel change so we do a full reset */
2310 ath_update_chainmasks(sc, ic->ic_curchan);
2311 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
2312 sc->sc_cur_rxchainmask);
2313 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
2314 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
2316 sc->sc_diversity = ath_hal_getdiversity(ah);
2318 /* Let DFS at it in case it's a DFS channel */
2319 ath_dfs_radar_enable(sc, ic->ic_curchan);
2321 /* Let spectral at in case spectral is enabled */
2322 ath_spectral_enable(sc, ic->ic_curchan);
2324 if (ath_startrecv(sc) != 0) /* restart recv */
2325 if_printf(ifp, "%s: unable to start recv logic\n", __func__);
2327 * We may be doing a reset in response to an ioctl
2328 * that changes the channel so update any state that
2329 * might change as a result.
2331 ath_chan_change(sc, ic->ic_curchan);
2332 if (sc->sc_beacons) { /* restart beacons */
2333 #ifdef IEEE80211_SUPPORT_TDMA
2335 ath_tdma_config(sc, NULL);
2338 ath_beacon_config(sc, NULL);
2342 * Release the reset lock and re-enable interrupts here.
2343 * If an interrupt was being processed in ath_intr(),
2344 * it would disable interrupts at this point. So we have
2345 * to atomically enable interrupts and decrement the
2346 * reset counter - this way ath_intr() doesn't end up
2347 * disabling interrupts without a corresponding enable
2348 * in the rest or channel change path.
2351 sc->sc_inreset_cnt--;
2352 /* XXX only do this if sc_inreset_cnt == 0? */
2353 ath_hal_intrset(ah, sc->sc_imask);
2357 * TX and RX can be started here. If it were started with
2358 * sc_inreset_cnt > 0, the TX and RX path would abort.
2359 * Thus if this is a nested call through the reset or
2360 * channel change code, TX completion will occur but
2361 * RX completion and ath_start / ath_tx_start will not
2365 /* Restart TX/RX as needed */
2368 /* Restart TX completion and pending TX */
2369 if (reset_type == ATH_RESET_NOLOSS) {
2370 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
2371 if (ATH_TXQ_SETUP(sc, i)) {
2372 ATH_TXQ_LOCK(&sc->sc_txq[i]);
2373 ath_txq_restart_dma(sc, &sc->sc_txq[i]);
2374 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
2377 ath_txq_sched(sc, &sc->sc_txq[i]);
2384 * This may have been set during an ath_start() call which
2385 * set this once it detected a concurrent TX was going on.
2388 IF_LOCK(&ifp->if_snd);
2389 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2390 IF_UNLOCK(&ifp->if_snd);
2392 /* Handle any frames in the TX queue */
2394 * XXX should this be done by the caller, rather than
2397 ath_tx_kick(sc); /* restart xmit */
2402 ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
2404 struct ieee80211com *ic = vap->iv_ic;
2405 struct ifnet *ifp = ic->ic_ifp;
2406 struct ath_softc *sc = ifp->if_softc;
2407 struct ath_hal *ah = sc->sc_ah;
2410 case IEEE80211_IOC_TXPOWER:
2412 * If per-packet TPC is enabled, then we have nothing
2413 * to do; otherwise we need to force the global limit.
2414 * All this can happen directly; no need to reset.
2416 if (!ath_hal_gettpc(ah))
2417 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
2420 /* XXX? Full or NOLOSS? */
2421 return ath_reset(ifp, ATH_RESET_FULL);
2425 _ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype)
2429 ATH_TXBUF_LOCK_ASSERT(sc);
2431 if (btype == ATH_BUFTYPE_MGMT)
2432 bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt);
2434 bf = TAILQ_FIRST(&sc->sc_txbuf);
2437 sc->sc_stats.ast_tx_getnobuf++;
2439 if (bf->bf_flags & ATH_BUF_BUSY) {
2440 sc->sc_stats.ast_tx_getbusybuf++;
2445 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) {
2446 if (btype == ATH_BUFTYPE_MGMT)
2447 TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list);
2449 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list);
2453 * This shuldn't happen; however just to be
2454 * safe print a warning and fudge the txbuf
2457 if (sc->sc_txbuf_cnt < 0) {
2458 device_printf(sc->sc_dev,
2459 "%s: sc_txbuf_cnt < 0?\n",
2461 sc->sc_txbuf_cnt = 0;
2468 /* XXX should check which list, mgmt or otherwise */
2469 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
2470 TAILQ_FIRST(&sc->sc_txbuf) == NULL ?
2471 "out of xmit buffers" : "xmit buffer busy");
2475 /* XXX TODO: should do this at buffer list initialisation */
2476 /* XXX (then, ensure the buffer has the right flag set) */
2478 if (btype == ATH_BUFTYPE_MGMT)
2479 bf->bf_flags |= ATH_BUF_MGMT;
2481 bf->bf_flags &= (~ATH_BUF_MGMT);
2483 /* Valid bf here; clear some basic fields */
2484 bf->bf_next = NULL; /* XXX just to be sure */
2485 bf->bf_last = NULL; /* XXX again, just to be sure */
2486 bf->bf_comp = NULL; /* XXX again, just to be sure */
2487 bzero(&bf->bf_state, sizeof(bf->bf_state));
2490 * Track the descriptor ID only if doing EDMA
2492 if (sc->sc_isedma) {
2493 bf->bf_descid = sc->sc_txbuf_descid;
2494 sc->sc_txbuf_descid++;
2501 * When retrying a software frame, buffers marked ATH_BUF_BUSY
2502 * can't be thrown back on the queue as they could still be
2503 * in use by the hardware.
2505 * This duplicates the buffer, or returns NULL.
2507 * The descriptor is also copied but the link pointers and
2508 * the DMA segments aren't copied; this frame should thus
2509 * be again passed through the descriptor setup/chain routines
2510 * so the link is correct.
2512 * The caller must free the buffer using ath_freebuf().
2515 ath_buf_clone(struct ath_softc *sc, struct ath_buf *bf)
2517 struct ath_buf *tbf;
2519 tbf = ath_getbuf(sc,
2520 (bf->bf_flags & ATH_BUF_MGMT) ?
2521 ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL);
2523 return NULL; /* XXX failure? Why? */
2526 tbf->bf_next = NULL;
2527 tbf->bf_nseg = bf->bf_nseg;
2528 tbf->bf_flags = bf->bf_flags & ATH_BUF_FLAGS_CLONE;
2529 tbf->bf_status = bf->bf_status;
2530 tbf->bf_m = bf->bf_m;
2531 tbf->bf_node = bf->bf_node;
2532 /* will be setup by the chain/setup function */
2533 tbf->bf_lastds = NULL;
2534 /* for now, last == self */
2536 tbf->bf_comp = bf->bf_comp;
2538 /* NOTE: DMA segments will be setup by the setup/chain functions */
2540 /* The caller has to re-init the descriptor + links */
2543 * Free the DMA mapping here, before we NULL the mbuf.
2544 * We must only call bus_dmamap_unload() once per mbuf chain
2545 * or behaviour is undefined.
2547 if (bf->bf_m != NULL) {
2549 * XXX is this POSTWRITE call required?
2551 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2552 BUS_DMASYNC_POSTWRITE);
2553 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2560 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state));
2566 ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype)
2571 bf = _ath_getbuf_locked(sc, btype);
2573 * If a mgmt buffer was requested but we're out of those,
2574 * try requesting a normal one.
2576 if (bf == NULL && btype == ATH_BUFTYPE_MGMT)
2577 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
2578 ATH_TXBUF_UNLOCK(sc);
2580 struct ifnet *ifp = sc->sc_ifp;
2582 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
2583 sc->sc_stats.ast_tx_qstop++;
2584 IF_LOCK(&ifp->if_snd);
2585 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2586 IF_UNLOCK(&ifp->if_snd);
2592 ath_start_queue(struct ifnet *ifp)
2594 struct ath_softc *sc = ifp->if_softc;
2596 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_queue: start");
2598 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_queue: finished");
2602 ath_start_task(void *arg, int npending)
2604 struct ath_softc *sc = (struct ath_softc *) arg;
2605 struct ifnet *ifp = sc->sc_ifp;
2607 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: start");
2609 /* XXX is it ok to hold the ATH_LOCK here? */
2611 if (sc->sc_inreset_cnt > 0) {
2612 device_printf(sc->sc_dev,
2613 "%s: sc_inreset_cnt > 0; bailing\n", __func__);
2615 IF_LOCK(&ifp->if_snd);
2616 sc->sc_stats.ast_tx_qstop++;
2617 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2618 IF_UNLOCK(&ifp->if_snd);
2619 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: OACTIVE, finish");
2622 sc->sc_txstart_cnt++;
2626 ath_start(sc->sc_ifp);
2630 sc->sc_txstart_cnt--;
2632 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: finished");
2636 ath_start(struct ifnet *ifp)
2638 struct ath_softc *sc = ifp->if_softc;
2639 struct ieee80211_node *ni;
2641 struct mbuf *m, *next;
2645 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
2648 ATH_TX_LOCK_ASSERT(sc);
2650 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start: called");
2654 if (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree) {
2655 /* XXX increment counter? */
2656 ATH_TXBUF_UNLOCK(sc);
2657 IF_LOCK(&ifp->if_snd);
2658 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2659 IF_UNLOCK(&ifp->if_snd);
2662 ATH_TXBUF_UNLOCK(sc);
2665 * Grab a TX buffer and associated resources.
2667 bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL);
2671 IFQ_DEQUEUE(&ifp->if_snd, m);
2674 ath_returnbuf_head(sc, bf);
2675 ATH_TXBUF_UNLOCK(sc);
2678 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
2681 * Check for fragmentation. If this frame
2682 * has been broken up verify we have enough
2683 * buffers to send all the fragments so all
2687 if ((m->m_flags & M_FRAG) &&
2688 !ath_txfrag_setup(sc, &frags, m, ni)) {
2689 DPRINTF(sc, ATH_DEBUG_XMIT,
2690 "%s: out of txfrag buffers\n", __func__);
2691 sc->sc_stats.ast_tx_nofrag++;
2699 * Pass the frame to the h/w for transmission.
2700 * Fragmented frames have each frag chained together
2701 * with m_nextpkt. We know there are sufficient ath_buf's
2702 * to send all the frags because of work done by
2703 * ath_txfrag_setup. We leave m_nextpkt set while
2704 * calling ath_tx_start so it can use it to extend the
2705 * the tx duration to cover the subsequent frag and
2706 * so it can reclaim all the mbufs in case of an error;
2707 * ath_tx_start clears m_nextpkt once it commits to
2708 * handing the frame to the hardware.
2710 next = m->m_nextpkt;
2711 if (ath_tx_start(sc, ni, bf, m)) {
2718 ath_returnbuf_head(sc, bf);
2719 ath_txfrag_cleanup(sc, &frags, ni);
2720 ATH_TXBUF_UNLOCK(sc);
2722 * XXX todo, free the node outside of
2723 * the TX lock context!
2726 ieee80211_free_node(ni);
2731 * Check here if the node is in power save state.
2733 ath_tx_update_tim(sc, ni, 1);
2737 * Beware of state changing between frags.
2738 * XXX check sta power-save state?
2740 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
2741 DPRINTF(sc, ATH_DEBUG_XMIT,
2742 "%s: flush fragmented packet, state %s\n",
2744 ieee80211_state_name[ni->ni_vap->iv_state]);
2750 bf = TAILQ_FIRST(&frags);
2751 KASSERT(bf != NULL, ("no buf for txfrag"));
2752 TAILQ_REMOVE(&frags, bf, bf_list);
2756 sc->sc_wd_timer = 5;
2758 ATH_KTR(sc, ATH_KTR_TX, 1, "ath_start: finished; npkts=%d", npkts);
2761 ath_media_change(struct ifnet *ifp)
2763 int error = ieee80211_media_change(ifp);
2764 /* NB: only the fixed rate can change and that doesn't need a reset */
2765 return (error == ENETRESET ? 0 : error);
2769 * Block/unblock tx+rx processing while a key change is done.
2770 * We assume the caller serializes key management operations
2771 * so we only need to worry about synchronization with other
2772 * uses that originate in the driver.
2775 ath_key_update_begin(struct ieee80211vap *vap)
2777 struct ifnet *ifp = vap->iv_ic->ic_ifp;
2778 struct ath_softc *sc = ifp->if_softc;
2780 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2781 taskqueue_block(sc->sc_tq);
2782 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */
2786 ath_key_update_end(struct ieee80211vap *vap)
2788 struct ifnet *ifp = vap->iv_ic->ic_ifp;
2789 struct ath_softc *sc = ifp->if_softc;
2791 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2792 IF_UNLOCK(&ifp->if_snd);
2793 taskqueue_unblock(sc->sc_tq);
2797 ath_update_promisc(struct ifnet *ifp)
2799 struct ath_softc *sc = ifp->if_softc;
2802 /* configure rx filter */
2803 rfilt = ath_calcrxfilter(sc);
2804 ath_hal_setrxfilter(sc->sc_ah, rfilt);
2806 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
2810 ath_update_mcast(struct ifnet *ifp)
2812 struct ath_softc *sc = ifp->if_softc;
2815 /* calculate and install multicast filter */
2816 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2817 struct ifmultiaddr *ifma;
2819 * Merge multicast addresses to form the hardware filter.
2821 mfilt[0] = mfilt[1] = 0;
2822 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */
2823 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2828 /* calculate XOR of eight 6bit values */
2829 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2830 val = LE_READ_4(dl + 0);
2831 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2832 val = LE_READ_4(dl + 3);
2833 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2835 mfilt[pos / 32] |= (1 << (pos % 32));
2837 if_maddr_runlock(ifp);
2839 mfilt[0] = mfilt[1] = ~0;
2840 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
2841 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
2842 __func__, mfilt[0], mfilt[1]);
2846 ath_mode_init(struct ath_softc *sc)
2848 struct ifnet *ifp = sc->sc_ifp;
2849 struct ath_hal *ah = sc->sc_ah;
2852 /* configure rx filter */
2853 rfilt = ath_calcrxfilter(sc);
2854 ath_hal_setrxfilter(ah, rfilt);
2856 /* configure operational mode */
2857 ath_hal_setopmode(ah);
2859 DPRINTF(sc, ATH_DEBUG_STATE | ATH_DEBUG_MODE,
2860 "%s: ah=%p, ifp=%p, if_addr=%p\n",
2864 (ifp == NULL) ? NULL : ifp->if_addr);
2866 /* handle any link-level address change */
2867 ath_hal_setmac(ah, IF_LLADDR(ifp));
2869 /* calculate and install multicast filter */
2870 ath_update_mcast(ifp);
2874 * Set the slot time based on the current setting.
2877 ath_setslottime(struct ath_softc *sc)
2879 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2880 struct ath_hal *ah = sc->sc_ah;
2883 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
2885 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
2887 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
2888 /* honor short/long slot time only in 11g */
2889 /* XXX shouldn't honor on pure g or turbo g channel */
2890 if (ic->ic_flags & IEEE80211_F_SHSLOT)
2891 usec = HAL_SLOT_TIME_9;
2893 usec = HAL_SLOT_TIME_20;
2895 usec = HAL_SLOT_TIME_9;
2897 DPRINTF(sc, ATH_DEBUG_RESET,
2898 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
2899 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
2900 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
2902 ath_hal_setslottime(ah, usec);
2903 sc->sc_updateslot = OK;
2907 * Callback from the 802.11 layer to update the
2908 * slot time based on the current setting.
2911 ath_updateslot(struct ifnet *ifp)
2913 struct ath_softc *sc = ifp->if_softc;
2914 struct ieee80211com *ic = ifp->if_l2com;
2917 * When not coordinating the BSS, change the hardware
2918 * immediately. For other operation we defer the change
2919 * until beacon updates have propagated to the stations.
2921 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2922 ic->ic_opmode == IEEE80211_M_MBSS)
2923 sc->sc_updateslot = UPDATE;
2925 ath_setslottime(sc);
2929 * Append the contents of src to dst; both queues
2930 * are assumed to be locked.
2933 ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
2936 ATH_TXQ_LOCK_ASSERT(src);
2937 ATH_TXQ_LOCK_ASSERT(dst);
2939 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list);
2940 dst->axq_link = src->axq_link;
2941 src->axq_link = NULL;
2942 dst->axq_depth += src->axq_depth;
2943 dst->axq_aggr_depth += src->axq_aggr_depth;
2945 src->axq_aggr_depth = 0;
2949 * Reset the hardware, with no loss.
2951 * This can't be used for a general case reset.
2954 ath_reset_proc(void *arg, int pending)
2956 struct ath_softc *sc = arg;
2957 struct ifnet *ifp = sc->sc_ifp;
2960 if_printf(ifp, "%s: resetting\n", __func__);
2962 ath_reset(ifp, ATH_RESET_NOLOSS);
2966 * Reset the hardware after detecting beacons have stopped.
2969 ath_bstuck_proc(void *arg, int pending)
2971 struct ath_softc *sc = arg;
2972 struct ifnet *ifp = sc->sc_ifp;
2975 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0)
2976 if_printf(ifp, "bb hang detected (0x%x)\n", hangs);
2978 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
2980 sc->sc_stats.ast_bstuck++;
2982 * This assumes that there's no simultaneous channel mode change
2985 ath_reset(ifp, ATH_RESET_NOLOSS);
2989 ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2991 bus_addr_t *paddr = (bus_addr_t*) arg;
2992 KASSERT(error == 0, ("error %u on bus_dma callback", error));
2993 *paddr = segs->ds_addr;
2997 * Allocate the descriptors and appropriate DMA tag/setup.
2999 * For some situations (eg EDMA TX completion), there isn't a requirement
3000 * for the ath_buf entries to be allocated.
3003 ath_descdma_alloc_desc(struct ath_softc *sc,
3004 struct ath_descdma *dd, ath_bufhead *head,
3005 const char *name, int ds_size, int ndesc)
3007 #define DS2PHYS(_dd, _ds) \
3008 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
3009 #define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \
3010 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
3011 struct ifnet *ifp = sc->sc_ifp;
3014 dd->dd_descsize = ds_size;
3016 DPRINTF(sc, ATH_DEBUG_RESET,
3017 "%s: %s DMA: %u desc, %d bytes per descriptor\n",
3018 __func__, name, ndesc, dd->dd_descsize);
3021 dd->dd_desc_len = dd->dd_descsize * ndesc;
3024 * Merlin work-around:
3025 * Descriptors that cross the 4KB boundary can't be used.
3026 * Assume one skipped descriptor per 4KB page.
3028 if (! ath_hal_split4ktrans(sc->sc_ah)) {
3029 int numpages = dd->dd_desc_len / 4096;
3030 dd->dd_desc_len += ds_size * numpages;
3034 * Setup DMA descriptor area.
3036 * BUS_DMA_ALLOCNOW is not used; we never use bounce
3037 * buffers for the descriptors themselves.
3039 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
3040 PAGE_SIZE, 0, /* alignment, bounds */
3041 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
3042 BUS_SPACE_MAXADDR, /* highaddr */
3043 NULL, NULL, /* filter, filterarg */
3044 dd->dd_desc_len, /* maxsize */
3046 dd->dd_desc_len, /* maxsegsize */
3048 NULL, /* lockfunc */
3052 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
3056 /* allocate descriptors */
3057 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
3058 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
3061 if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
3062 "error %u\n", ndesc, dd->dd_name, error);
3066 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
3067 dd->dd_desc, dd->dd_desc_len,
3068 ath_load_cb, &dd->dd_desc_paddr,
3071 if_printf(ifp, "unable to map %s descriptors, error %u\n",
3072 dd->dd_name, error);
3076 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
3077 __func__, dd->dd_name, (uint8_t *) dd->dd_desc,
3078 (u_long) dd->dd_desc_len, (caddr_t) dd->dd_desc_paddr,
3079 /*XXX*/ (u_long) dd->dd_desc_len);
3084 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3086 bus_dma_tag_destroy(dd->dd_dmat);
3087 memset(dd, 0, sizeof(*dd));
3090 #undef ATH_DESC_4KB_BOUND_CHECK
3094 ath_descdma_setup(struct ath_softc *sc,
3095 struct ath_descdma *dd, ath_bufhead *head,
3096 const char *name, int ds_size, int nbuf, int ndesc)
3098 #define DS2PHYS(_dd, _ds) \
3099 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
3100 #define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \
3101 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
3102 struct ifnet *ifp = sc->sc_ifp;
3105 int i, bsize, error;
3107 /* Allocate descriptors */
3108 error = ath_descdma_alloc_desc(sc, dd, head, name, ds_size,
3111 /* Assume any errors during allocation were dealt with */
3116 ds = (uint8_t *) dd->dd_desc;
3118 /* allocate rx buffers */
3119 bsize = sizeof(struct ath_buf) * nbuf;
3120 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
3122 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3123 dd->dd_name, bsize);
3129 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * dd->dd_descsize)) {
3130 bf->bf_desc = (struct ath_desc *) ds;
3131 bf->bf_daddr = DS2PHYS(dd, ds);
3132 if (! ath_hal_split4ktrans(sc->sc_ah)) {
3134 * Merlin WAR: Skip descriptor addresses which
3135 * cause 4KB boundary crossing along any point
3136 * in the descriptor.
3138 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr,
3140 /* Start at the next page */
3141 ds += 0x1000 - (bf->bf_daddr & 0xFFF);
3142 bf->bf_desc = (struct ath_desc *) ds;
3143 bf->bf_daddr = DS2PHYS(dd, ds);
3146 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3149 if_printf(ifp, "unable to create dmamap for %s "
3150 "buffer %u, error %u\n", dd->dd_name, i, error);
3151 ath_descdma_cleanup(sc, dd, head);
3154 bf->bf_lastds = bf->bf_desc; /* Just an initial value */
3155 TAILQ_INSERT_TAIL(head, bf, bf_list);
3159 * XXX TODO: ensure that ds doesn't overflow the descriptor
3160 * allocation otherwise weird stuff will occur and crash your
3164 /* XXX this should likely just call ath_descdma_cleanup() */
3166 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3167 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3168 bus_dma_tag_destroy(dd->dd_dmat);
3169 memset(dd, 0, sizeof(*dd));
3172 #undef ATH_DESC_4KB_BOUND_CHECK
3176 * Allocate ath_buf entries but no descriptor contents.
3178 * This is for RX EDMA where the descriptors are the header part of
3182 ath_descdma_setup_rx_edma(struct ath_softc *sc,
3183 struct ath_descdma *dd, ath_bufhead *head,
3184 const char *name, int nbuf, int rx_status_len)
3186 struct ifnet *ifp = sc->sc_ifp;
3188 int i, bsize, error;
3190 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers\n",
3191 __func__, name, nbuf);
3195 * This is (mostly) purely for show. We're not allocating any actual
3196 * descriptors here as EDMA RX has the descriptor be part
3199 * However, dd_desc_len is used by ath_descdma_free() to determine
3200 * whether we have already freed this DMA mapping.
3202 dd->dd_desc_len = rx_status_len * nbuf;
3203 dd->dd_descsize = rx_status_len;
3205 /* allocate rx buffers */
3206 bsize = sizeof(struct ath_buf) * nbuf;
3207 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
3209 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3210 dd->dd_name, bsize);
3217 for (i = 0; i < nbuf; i++, bf++) {
3220 bf->bf_lastds = NULL; /* Just an initial value */
3222 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3225 if_printf(ifp, "unable to create dmamap for %s "
3226 "buffer %u, error %u\n", dd->dd_name, i, error);
3227 ath_descdma_cleanup(sc, dd, head);
3230 TAILQ_INSERT_TAIL(head, bf, bf_list);
3234 memset(dd, 0, sizeof(*dd));
3239 ath_descdma_cleanup(struct ath_softc *sc,
3240 struct ath_descdma *dd, ath_bufhead *head)
3243 struct ieee80211_node *ni;
3246 if (dd->dd_dmamap != 0) {
3247 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3248 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3249 bus_dma_tag_destroy(dd->dd_dmat);
3253 TAILQ_FOREACH(bf, head, bf_list) {
3256 * XXX warn if there's buffers here.
3257 * XXX it should have been freed by the
3261 if (do_warning == 0) {
3263 device_printf(sc->sc_dev,
3264 "%s: %s: mbuf should've been"
3265 " unmapped/freed!\n",
3269 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3270 BUS_DMASYNC_POSTREAD);
3271 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3275 if (bf->bf_dmamap != NULL) {
3276 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
3277 bf->bf_dmamap = NULL;
3283 * Reclaim node reference.
3285 ieee80211_free_node(ni);
3293 if (dd->dd_bufptr != NULL)
3294 free(dd->dd_bufptr, M_ATHDEV);
3295 memset(dd, 0, sizeof(*dd));
3299 ath_desc_alloc(struct ath_softc *sc)
3303 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
3304 "tx", sc->sc_tx_desclen, ath_txbuf, ATH_MAX_SCATTER);
3308 sc->sc_txbuf_cnt = ath_txbuf;
3310 error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt,
3311 "tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt,
3314 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3319 * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the
3320 * flag doesn't have to be set in ath_getbuf_locked().
3323 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
3324 "beacon", sc->sc_tx_desclen, ATH_BCBUF, 1);
3326 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3327 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
3328 &sc->sc_txbuf_mgmt);
3335 ath_desc_free(struct ath_softc *sc)
3338 if (sc->sc_bdma.dd_desc_len != 0)
3339 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
3340 if (sc->sc_txdma.dd_desc_len != 0)
3341 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3342 if (sc->sc_txdma_mgmt.dd_desc_len != 0)
3343 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
3344 &sc->sc_txbuf_mgmt);
3347 static struct ieee80211_node *
3348 ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3350 struct ieee80211com *ic = vap->iv_ic;
3351 struct ath_softc *sc = ic->ic_ifp->if_softc;
3352 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
3353 struct ath_node *an;
3355 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
3360 ath_rate_node_init(sc, an);
3362 /* Setup the mutex - there's no associd yet so set the name to NULL */
3363 snprintf(an->an_name, sizeof(an->an_name), "%s: node %p",
3364 device_get_nameunit(sc->sc_dev), an);
3365 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF);
3367 /* XXX setup ath_tid */
3368 ath_tx_tid_init(sc, an);
3370 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
3371 return &an->an_node;
3375 ath_node_cleanup(struct ieee80211_node *ni)
3377 struct ieee80211com *ic = ni->ni_ic;
3378 struct ath_softc *sc = ic->ic_ifp->if_softc;
3380 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */
3381 ath_tx_node_flush(sc, ATH_NODE(ni));
3382 ath_rate_node_cleanup(sc, ATH_NODE(ni));
3383 sc->sc_node_cleanup(ni);
3387 ath_node_free(struct ieee80211_node *ni)
3389 struct ieee80211com *ic = ni->ni_ic;
3390 struct ath_softc *sc = ic->ic_ifp->if_softc;
3392 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni);
3393 mtx_destroy(&ATH_NODE(ni)->an_mtx);
3394 sc->sc_node_free(ni);
3398 ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
3400 struct ieee80211com *ic = ni->ni_ic;
3401 struct ath_softc *sc = ic->ic_ifp->if_softc;
3402 struct ath_hal *ah = sc->sc_ah;
3404 *rssi = ic->ic_node_getrssi(ni);
3405 if (ni->ni_chan != IEEE80211_CHAN_ANYC)
3406 *noise = ath_hal_getchannoise(ah, ni->ni_chan);
3408 *noise = -95; /* nominally correct */
3412 * Set the default antenna.
3415 ath_setdefantenna(struct ath_softc *sc, u_int antenna)
3417 struct ath_hal *ah = sc->sc_ah;
3419 /* XXX block beacon interrupts */
3420 ath_hal_setdefantenna(ah, antenna);
3421 if (sc->sc_defant != antenna)
3422 sc->sc_stats.ast_ant_defswitch++;
3423 sc->sc_defant = antenna;
3424 sc->sc_rxotherant = 0;
3428 ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
3430 txq->axq_qnum = qnum;
3433 txq->axq_aggr_depth = 0;
3434 txq->axq_intrcnt = 0;
3435 txq->axq_link = NULL;
3436 txq->axq_softc = sc;
3437 TAILQ_INIT(&txq->axq_q);
3438 TAILQ_INIT(&txq->axq_tidq);
3439 TAILQ_INIT(&txq->fifo.axq_q);
3440 ATH_TXQ_LOCK_INIT(sc, txq);
3444 * Setup a h/w transmit queue.
3446 static struct ath_txq *
3447 ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
3449 #define N(a) (sizeof(a)/sizeof(a[0]))
3450 struct ath_hal *ah = sc->sc_ah;
3454 memset(&qi, 0, sizeof(qi));
3455 qi.tqi_subtype = subtype;
3456 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
3457 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
3458 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
3460 * Enable interrupts only for EOL and DESC conditions.
3461 * We mark tx descriptors to receive a DESC interrupt
3462 * when a tx queue gets deep; otherwise waiting for the
3463 * EOL to reap descriptors. Note that this is done to
3464 * reduce interrupt load and this only defers reaping
3465 * descriptors, never transmitting frames. Aside from
3466 * reducing interrupts this also permits more concurrency.
3467 * The only potential downside is if the tx queue backs
3468 * up in which case the top half of the kernel may backup
3469 * due to a lack of tx descriptors.
3471 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE;
3472 qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
3475 * NB: don't print a message, this happens
3476 * normally on parts with too few tx queues
3480 if (qnum >= N(sc->sc_txq)) {
3481 device_printf(sc->sc_dev,
3482 "hal qnum %u out of range, max %zu!\n",
3483 qnum, N(sc->sc_txq));
3484 ath_hal_releasetxqueue(ah, qnum);
3487 if (!ATH_TXQ_SETUP(sc, qnum)) {
3488 ath_txq_init(sc, &sc->sc_txq[qnum], qnum);
3489 sc->sc_txqsetup |= 1<<qnum;
3491 return &sc->sc_txq[qnum];
3496 * Setup a hardware data transmit queue for the specified
3497 * access control. The hal may not support all requested
3498 * queues in which case it will return a reference to a
3499 * previously setup queue. We record the mapping from ac's
3500 * to h/w queues for use by ath_tx_start and also track
3501 * the set of h/w queues being used to optimize work in the
3502 * transmit interrupt handler and related routines.
3505 ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
3507 #define N(a) (sizeof(a)/sizeof(a[0]))
3508 struct ath_txq *txq;
3510 if (ac >= N(sc->sc_ac2q)) {
3511 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
3512 ac, N(sc->sc_ac2q));
3515 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
3518 sc->sc_ac2q[ac] = txq;
3526 * Update WME parameters for a transmit queue.
3529 ath_txq_update(struct ath_softc *sc, int ac)
3531 #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1)
3532 #define ATH_TXOP_TO_US(v) (v<<5)
3533 struct ifnet *ifp = sc->sc_ifp;
3534 struct ieee80211com *ic = ifp->if_l2com;
3535 struct ath_txq *txq = sc->sc_ac2q[ac];
3536 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
3537 struct ath_hal *ah = sc->sc_ah;
3540 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
3541 #ifdef IEEE80211_SUPPORT_TDMA
3544 * AIFS is zero so there's no pre-transmit wait. The
3545 * burst time defines the slot duration and is configured
3546 * through net80211. The QCU is setup to not do post-xmit
3547 * back off, lockout all lower-priority QCU's, and fire
3548 * off the DMA beacon alert timer which is setup based
3549 * on the slot configuration.
3551 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
3552 | HAL_TXQ_TXERRINT_ENABLE
3553 | HAL_TXQ_TXURNINT_ENABLE
3554 | HAL_TXQ_TXEOLINT_ENABLE
3556 | HAL_TXQ_BACKOFF_DISABLE
3557 | HAL_TXQ_ARB_LOCKOUT_GLOBAL
3561 qi.tqi_readyTime = sc->sc_tdmaslotlen;
3562 qi.tqi_burstTime = qi.tqi_readyTime;
3566 * XXX shouldn't this just use the default flags
3567 * used in the previous queue setup?
3569 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
3570 | HAL_TXQ_TXERRINT_ENABLE
3571 | HAL_TXQ_TXDESCINT_ENABLE
3572 | HAL_TXQ_TXURNINT_ENABLE
3573 | HAL_TXQ_TXEOLINT_ENABLE
3575 qi.tqi_aifs = wmep->wmep_aifsn;
3576 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
3577 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
3578 qi.tqi_readyTime = 0;
3579 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
3580 #ifdef IEEE80211_SUPPORT_TDMA
3584 DPRINTF(sc, ATH_DEBUG_RESET,
3585 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n",
3586 __func__, txq->axq_qnum, qi.tqi_qflags,
3587 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime);
3589 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
3590 if_printf(ifp, "unable to update hardware queue "
3591 "parameters for %s traffic!\n",
3592 ieee80211_wme_acnames[ac]);
3595 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
3598 #undef ATH_TXOP_TO_US
3599 #undef ATH_EXPONENT_TO_VALUE
3603 * Callback from the 802.11 layer to update WME parameters.
3606 ath_wme_update(struct ieee80211com *ic)
3608 struct ath_softc *sc = ic->ic_ifp->if_softc;
3610 return !ath_txq_update(sc, WME_AC_BE) ||
3611 !ath_txq_update(sc, WME_AC_BK) ||
3612 !ath_txq_update(sc, WME_AC_VI) ||
3613 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0;
3617 * Reclaim resources for a setup queue.
3620 ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
3623 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
3624 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
3625 ATH_TXQ_LOCK_DESTROY(txq);
3629 * Reclaim all tx queue resources.
3632 ath_tx_cleanup(struct ath_softc *sc)
3636 ATH_TXBUF_LOCK_DESTROY(sc);
3637 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
3638 if (ATH_TXQ_SETUP(sc, i))
3639 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
3643 * Return h/w rate index for an IEEE rate (w/o basic rate bit)
3644 * using the current rates in sc_rixmap.
3647 ath_tx_findrix(const struct ath_softc *sc, uint8_t rate)
3649 int rix = sc->sc_rixmap[rate];
3650 /* NB: return lowest rix for invalid rate */
3651 return (rix == 0xff ? 0 : rix);
3655 ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts,
3658 struct ieee80211_node *ni = bf->bf_node;
3659 struct ifnet *ifp = sc->sc_ifp;
3660 struct ieee80211com *ic = ifp->if_l2com;
3663 if (ts->ts_status == 0) {
3664 u_int8_t txant = ts->ts_antenna;
3665 sc->sc_stats.ast_ant_tx[txant]++;
3666 sc->sc_ant_tx[txant]++;
3667 if (ts->ts_finaltsi != 0)
3668 sc->sc_stats.ast_tx_altrate++;
3669 pri = M_WME_GETAC(bf->bf_m);
3670 if (pri >= WME_AC_VO)
3671 ic->ic_wme.wme_hipri_traffic++;
3672 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)
3673 ni->ni_inact = ni->ni_inact_reload;
3675 if (ts->ts_status & HAL_TXERR_XRETRY)
3676 sc->sc_stats.ast_tx_xretries++;
3677 if (ts->ts_status & HAL_TXERR_FIFO)
3678 sc->sc_stats.ast_tx_fifoerr++;
3679 if (ts->ts_status & HAL_TXERR_FILT)
3680 sc->sc_stats.ast_tx_filtered++;
3681 if (ts->ts_status & HAL_TXERR_XTXOP)
3682 sc->sc_stats.ast_tx_xtxop++;
3683 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED)
3684 sc->sc_stats.ast_tx_timerexpired++;
3686 if (bf->bf_m->m_flags & M_FF)
3687 sc->sc_stats.ast_ff_txerr++;
3689 /* XXX when is this valid? */
3690 if (ts->ts_flags & HAL_TX_DESC_CFG_ERR)
3691 sc->sc_stats.ast_tx_desccfgerr++;
3693 * This can be valid for successful frame transmission!
3694 * If there's a TX FIFO underrun during aggregate transmission,
3695 * the MAC will pad the rest of the aggregate with delimiters.
3696 * If a BA is returned, the frame is marked as "OK" and it's up
3697 * to the TX completion code to notice which frames weren't
3698 * successfully transmitted.
3700 if (ts->ts_flags & HAL_TX_DATA_UNDERRUN)
3701 sc->sc_stats.ast_tx_data_underrun++;
3702 if (ts->ts_flags & HAL_TX_DELIM_UNDERRUN)
3703 sc->sc_stats.ast_tx_delim_underrun++;
3705 sr = ts->ts_shortretry;
3706 lr = ts->ts_longretry;
3707 sc->sc_stats.ast_tx_shortretry += sr;
3708 sc->sc_stats.ast_tx_longretry += lr;
3713 * The default completion. If fail is 1, this means
3714 * "please don't retry the frame, and just return -1 status
3715 * to the net80211 stack.
3718 ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
3720 struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
3726 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ?
3727 ts->ts_status : HAL_TXERR_XRETRY;
3730 if (bf->bf_state.bfs_dobaw)
3731 device_printf(sc->sc_dev,
3732 "%s: bf %p: seqno %d: dobaw should've been cleared!\n",
3735 SEQNO(bf->bf_state.bfs_seqno));
3737 if (bf->bf_next != NULL)
3738 device_printf(sc->sc_dev,
3739 "%s: bf %p: seqno %d: bf_next not NULL!\n",
3742 SEQNO(bf->bf_state.bfs_seqno));
3745 * Check if the node software queue is empty; if so
3746 * then clear the TIM.
3748 * This needs to be done before the buffer is freed as
3749 * otherwise the node reference will have been released
3750 * and the node may not actually exist any longer.
3752 * XXX I don't like this belonging here, but it's cleaner
3753 * to do it here right now then all the other places
3754 * where ath_tx_default_comp() is called.
3756 * XXX TODO: during drain, ensure that the callback is
3757 * being called so we get a chance to update the TIM.
3760 ath_tx_update_tim(sc, bf->bf_node, 0);
3763 * Do any tx complete callback. Note this must
3764 * be done before releasing the node reference.
3765 * This will free the mbuf, release the net80211
3766 * node and recycle the ath_buf.
3768 ath_tx_freebuf(sc, bf, st);
3772 * Update rate control with the given completion status.
3775 ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
3776 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen,
3777 int nframes, int nbad)
3779 struct ath_node *an;
3781 /* Only for unicast frames */
3786 ATH_NODE_UNLOCK_ASSERT(an);
3788 if ((ts->ts_status & HAL_TXERR_FILT) == 0) {
3790 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad);
3791 ATH_NODE_UNLOCK(an);
3796 * Process the completion of the given buffer.
3798 * This calls the rate control update and then the buffer completion.
3799 * This will either free the buffer or requeue it. In any case, the
3800 * bf pointer should be treated as invalid after this function is called.
3803 ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq,
3804 struct ath_tx_status *ts, struct ath_buf *bf)
3806 struct ieee80211_node *ni = bf->bf_node;
3807 struct ath_node *an = NULL;
3809 ATH_TX_UNLOCK_ASSERT(sc);
3811 /* If unicast frame, update general statistics */
3814 /* update statistics */
3815 ath_tx_update_stats(sc, ts, bf);
3819 * Call the completion handler.
3820 * The completion handler is responsible for
3821 * calling the rate control code.
3823 * Frames with no completion handler get the
3824 * rate control code called here.
3826 if (bf->bf_comp == NULL) {
3827 if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
3828 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) {
3830 * XXX assume this isn't an aggregate
3833 ath_tx_update_ratectrl(sc, ni,
3834 bf->bf_state.bfs_rc, ts,
3835 bf->bf_state.bfs_pktlen, 1,
3836 (ts->ts_status == 0 ? 0 : 1));
3838 ath_tx_default_comp(sc, bf, 0);
3840 bf->bf_comp(sc, bf, 0);
3846 * Process completed xmit descriptors from the specified queue.
3847 * Kick the packet scheduler if needed. This can occur from this
3851 ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched)
3853 struct ath_hal *ah = sc->sc_ah;
3855 struct ath_desc *ds;
3856 struct ath_tx_status *ts;
3857 struct ieee80211_node *ni;
3858 #ifdef IEEE80211_SUPPORT_SUPERG
3859 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3860 #endif /* IEEE80211_SUPPORT_SUPERG */
3864 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
3865 __func__, txq->axq_qnum,
3866 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
3869 ATH_KTR(sc, ATH_KTR_TXCOMP, 4,
3870 "ath_tx_processq: txq=%u head %p link %p depth %p",
3872 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
3879 txq->axq_intrcnt = 0; /* reset periodic desc intr count */
3880 bf = TAILQ_FIRST(&txq->axq_q);
3882 ATH_TXQ_UNLOCK(txq);
3885 ds = bf->bf_lastds; /* XXX must be setup correctly! */
3886 ts = &bf->bf_status.ds_txstat;
3888 status = ath_hal_txprocdesc(ah, ds, ts);
3890 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
3891 ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
3893 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0))
3894 ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
3897 #ifdef ATH_DEBUG_ALQ
3898 if (if_ath_alq_checkdebug(&sc->sc_alq,
3899 ATH_ALQ_EDMA_TXSTATUS)) {
3900 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS,
3901 sc->sc_tx_statuslen,
3906 if (status == HAL_EINPROGRESS) {
3907 ATH_KTR(sc, ATH_KTR_TXCOMP, 3,
3908 "ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS",
3909 txq->axq_qnum, bf, ds);
3910 ATH_TXQ_UNLOCK(txq);
3913 ATH_TXQ_REMOVE(txq, bf, bf_list);
3914 if (txq->axq_depth > 0) {
3916 * More frames follow. Mark the buffer busy
3917 * so it's not re-used while the hardware may
3918 * still re-read the link field in the descriptor.
3920 * Use the last buffer in an aggregate as that
3921 * is where the hardware may be - intermediate
3922 * descriptors won't be "busy".
3924 bf->bf_last->bf_flags |= ATH_BUF_BUSY;
3926 txq->axq_link = NULL;
3927 if (bf->bf_state.bfs_aggr)
3928 txq->axq_aggr_depth--;
3932 ATH_KTR(sc, ATH_KTR_TXCOMP, 5,
3933 "ath_tx_processq: txq=%u, bf=%p, ds=%p, ni=%p, ts_status=0x%08x",
3934 txq->axq_qnum, bf, ds, ni, ts->ts_status);
3936 * If unicast frame was ack'd update RSSI,
3937 * including the last rx time used to
3938 * workaround phantom bmiss interrupts.
3940 if (ni != NULL && ts->ts_status == 0 &&
3941 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) {
3943 sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
3944 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
3947 ATH_TXQ_UNLOCK(txq);
3950 * Update statistics and call completion
3952 ath_tx_process_buf_completion(sc, txq, ts, bf);
3954 /* XXX at this point, bf and ni may be totally invalid */
3956 #ifdef IEEE80211_SUPPORT_SUPERG
3958 * Flush fast-frame staging queue when traffic slows.
3960 if (txq->axq_depth <= 1)
3961 ieee80211_ff_flush(ic, txq->axq_ac);
3964 /* Kick the software TXQ scheduler */
3967 ath_txq_sched(sc, txq);
3971 ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
3972 "ath_tx_processq: txq=%u: done",
3978 #define TXQACTIVE(t, q) ( (t) & (1 << (q)))
3981 * Deferred processing of transmit interrupt; special-cased
3982 * for a single hardware transmit queue (e.g. 5210 and 5211).
3985 ath_tx_proc_q0(void *arg, int npending)
3987 struct ath_softc *sc = arg;
3988 struct ifnet *ifp = sc->sc_ifp;
3992 sc->sc_txproc_cnt++;
3993 txqs = sc->sc_txq_active;
3994 sc->sc_txq_active &= ~txqs;
3997 ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
3998 "ath_tx_proc_q0: txqs=0x%08x", txqs);
4000 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1))
4001 /* XXX why is lastrx updated in tx code? */
4002 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4003 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
4004 ath_tx_processq(sc, sc->sc_cabq, 1);
4005 IF_LOCK(&ifp->if_snd);
4006 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4007 IF_UNLOCK(&ifp->if_snd);
4008 sc->sc_wd_timer = 0;
4011 ath_led_event(sc, sc->sc_txrix);
4014 sc->sc_txproc_cnt--;
4021 * Deferred processing of transmit interrupt; special-cased
4022 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
4025 ath_tx_proc_q0123(void *arg, int npending)
4027 struct ath_softc *sc = arg;
4028 struct ifnet *ifp = sc->sc_ifp;
4033 sc->sc_txproc_cnt++;
4034 txqs = sc->sc_txq_active;
4035 sc->sc_txq_active &= ~txqs;
4038 ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
4039 "ath_tx_proc_q0123: txqs=0x%08x", txqs);
4042 * Process each active queue.
4045 if (TXQACTIVE(txqs, 0))
4046 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1);
4047 if (TXQACTIVE(txqs, 1))
4048 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1);
4049 if (TXQACTIVE(txqs, 2))
4050 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1);
4051 if (TXQACTIVE(txqs, 3))
4052 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1);
4053 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
4054 ath_tx_processq(sc, sc->sc_cabq, 1);
4056 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4058 IF_LOCK(&ifp->if_snd);
4059 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4060 IF_UNLOCK(&ifp->if_snd);
4061 sc->sc_wd_timer = 0;
4064 ath_led_event(sc, sc->sc_txrix);
4067 sc->sc_txproc_cnt--;
4074 * Deferred processing of transmit interrupt.
4077 ath_tx_proc(void *arg, int npending)
4079 struct ath_softc *sc = arg;
4080 struct ifnet *ifp = sc->sc_ifp;
4085 sc->sc_txproc_cnt++;
4086 txqs = sc->sc_txq_active;
4087 sc->sc_txq_active &= ~txqs;
4090 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc: txqs=0x%08x", txqs);
4093 * Process each active queue.
4096 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4097 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i))
4098 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1);
4100 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4102 /* XXX check this inside of IF_LOCK? */
4103 IF_LOCK(&ifp->if_snd);
4104 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4105 IF_UNLOCK(&ifp->if_snd);
4106 sc->sc_wd_timer = 0;
4109 ath_led_event(sc, sc->sc_txrix);
4112 sc->sc_txproc_cnt--;
4120 * Deferred processing of TXQ rescheduling.
4123 ath_txq_sched_tasklet(void *arg, int npending)
4125 struct ath_softc *sc = arg;
4128 /* XXX is skipping ok? */
4131 if (sc->sc_inreset_cnt > 0) {
4132 device_printf(sc->sc_dev,
4133 "%s: sc_inreset_cnt > 0; skipping\n", __func__);
4138 sc->sc_txproc_cnt++;
4142 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
4143 if (ATH_TXQ_SETUP(sc, i)) {
4144 ath_txq_sched(sc, &sc->sc_txq[i]);
4150 sc->sc_txproc_cnt--;
4155 ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf)
4158 ATH_TXBUF_LOCK_ASSERT(sc);
4160 if (bf->bf_flags & ATH_BUF_MGMT)
4161 TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list);
4163 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
4165 if (sc->sc_txbuf_cnt > ath_txbuf) {
4166 device_printf(sc->sc_dev,
4167 "%s: sc_txbuf_cnt > %d?\n",
4170 sc->sc_txbuf_cnt = ath_txbuf;
4176 ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf)
4179 ATH_TXBUF_LOCK_ASSERT(sc);
4181 if (bf->bf_flags & ATH_BUF_MGMT)
4182 TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list);
4184 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
4186 if (sc->sc_txbuf_cnt > ATH_TXBUF) {
4187 device_printf(sc->sc_dev,
4188 "%s: sc_txbuf_cnt > %d?\n",
4191 sc->sc_txbuf_cnt = ATH_TXBUF;
4197 * Free the holding buffer if it exists
4200 ath_txq_freeholdingbuf(struct ath_softc *sc, struct ath_txq *txq)
4202 ATH_TXBUF_LOCK_ASSERT(sc);
4204 if (txq->axq_holdingbf == NULL)
4207 txq->axq_holdingbf->bf_flags &= ~ATH_BUF_BUSY;
4208 ath_returnbuf_tail(sc, txq->axq_holdingbf);
4209 txq->axq_holdingbf = NULL;
4213 * Add this buffer to the holding queue, freeing the previous
4217 ath_txq_addholdingbuf(struct ath_softc *sc, struct ath_buf *bf)
4219 struct ath_txq *txq;
4221 ATH_TXBUF_LOCK_ASSERT(sc);
4223 /* XXX assert ATH_BUF_BUSY is set */
4225 /* XXX assert the tx queue is under the max number */
4226 if (bf->bf_state.bfs_tx_queue > HAL_NUM_TX_QUEUES) {
4227 device_printf(sc->sc_dev, "%s: bf=%p: invalid tx queue (%d)\n",
4230 bf->bf_state.bfs_tx_queue);
4231 bf->bf_flags &= ~ATH_BUF_BUSY;
4232 ath_returnbuf_tail(sc, bf);
4235 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue];
4236 ath_txq_freeholdingbuf(sc, txq);
4237 txq->axq_holdingbf = bf;
4241 * Return a buffer to the pool and update the 'busy' flag on the
4242 * previous 'tail' entry.
4244 * This _must_ only be called when the buffer is involved in a completed
4245 * TX. The logic is that if it was part of an active TX, the previous
4246 * buffer on the list is now not involved in a halted TX DMA queue, waiting
4247 * for restart (eg for TDMA.)
4249 * The caller must free the mbuf and recycle the node reference.
4252 ath_freebuf(struct ath_softc *sc, struct ath_buf *bf)
4254 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__));
4255 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__));
4258 * If this buffer is busy, push it onto the holding queue
4260 if (bf->bf_flags & ATH_BUF_BUSY) {
4262 ath_txq_addholdingbuf(sc, bf);
4263 ATH_TXBUF_UNLOCK(sc);
4268 * Not a busy buffer, so free normally
4271 ath_returnbuf_tail(sc, bf);
4272 ATH_TXBUF_UNLOCK(sc);
4276 * This is currently used by ath_tx_draintxq() and
4277 * ath_tx_tid_free_pkts().
4279 * It recycles a single ath_buf.
4282 ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status)
4284 struct ieee80211_node *ni = bf->bf_node;
4285 struct mbuf *m0 = bf->bf_m;
4288 * Make sure that we only sync/unload if there's an mbuf.
4289 * If not (eg we cloned a buffer), the unload will have already
4292 if (bf->bf_m != NULL) {
4293 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4294 BUS_DMASYNC_POSTWRITE);
4295 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4301 /* Free the buffer, it's not needed any longer */
4302 ath_freebuf(sc, bf);
4306 * Do any callback and reclaim the node reference.
4308 if (m0->m_flags & M_TXCB)
4309 ieee80211_process_callback(ni, m0, status);
4310 ieee80211_free_node(ni);
4313 /* Finally, we don't need this mbuf any longer */
4317 static struct ath_buf *
4318 ath_tx_draintxq_get_one(struct ath_softc *sc, struct ath_txq *txq)
4322 ATH_TXQ_LOCK_ASSERT(txq);
4325 * Drain the FIFO queue first, then if it's
4326 * empty, move to the normal frame queue.
4328 bf = TAILQ_FIRST(&txq->fifo.axq_q);
4331 * Is it the last buffer in this set?
4332 * Decrement the FIFO counter.
4334 if (bf->bf_flags & ATH_BUF_FIFOEND) {
4335 if (txq->axq_fifo_depth == 0) {
4336 device_printf(sc->sc_dev,
4337 "%s: Q%d: fifo_depth=0, fifo.axq_depth=%d?\n",
4340 txq->fifo.axq_depth);
4342 txq->axq_fifo_depth--;
4344 ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list);
4351 if (txq->axq_fifo_depth != 0 || txq->fifo.axq_depth != 0) {
4352 device_printf(sc->sc_dev,
4353 "%s: Q%d: fifo_depth=%d, fifo.axq_depth=%d\n",
4356 txq->axq_fifo_depth,
4357 txq->fifo.axq_depth);
4361 * Now drain the pending queue.
4363 bf = TAILQ_FIRST(&txq->axq_q);
4365 txq->axq_link = NULL;
4368 ATH_TXQ_REMOVE(txq, bf, bf_list);
4373 ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
4376 struct ath_hal *ah = sc->sc_ah;
4382 * NB: this assumes output has been stopped and
4383 * we do not need to block ath_tx_proc
4385 for (ix = 0;; ix++) {
4387 bf = ath_tx_draintxq_get_one(sc, txq);
4389 ATH_TXQ_UNLOCK(txq);
4392 if (bf->bf_state.bfs_aggr)
4393 txq->axq_aggr_depth--;
4395 if (sc->sc_debug & ATH_DEBUG_RESET) {
4396 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
4400 * EDMA operation has a TX completion FIFO
4401 * separate from the TX descriptor, so this
4402 * method of checking the "completion" status
4405 if (! sc->sc_isedma) {
4406 status = (ath_hal_txprocdesc(ah,
4408 &bf->bf_status.ds_txstat) == HAL_OK);
4410 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, status);
4411 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *),
4412 bf->bf_m->m_len, 0, -1);
4414 #endif /* ATH_DEBUG */
4416 * Since we're now doing magic in the completion
4417 * functions, we -must- call it for aggregation
4418 * destinations or BAW tracking will get upset.
4421 * Clear ATH_BUF_BUSY; the completion handler
4422 * will free the buffer.
4424 ATH_TXQ_UNLOCK(txq);
4425 bf->bf_flags &= ~ATH_BUF_BUSY;
4427 bf->bf_comp(sc, bf, 1);
4429 ath_tx_default_comp(sc, bf, 1);
4433 * Free the holding buffer if it exists
4436 ath_txq_freeholdingbuf(sc, txq);
4437 ATH_TXBUF_UNLOCK(sc);
4440 * Drain software queued frames which are on
4443 ath_tx_txq_drain(sc, txq);
4447 ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
4449 struct ath_hal *ah = sc->sc_ah;
4451 DPRINTF(sc, ATH_DEBUG_RESET,
4452 "%s: tx queue [%u] %p, flags 0x%08x, link %p\n",
4455 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum),
4458 (void) ath_hal_stoptxdma(ah, txq->axq_qnum);
4462 ath_stoptxdma(struct ath_softc *sc)
4464 struct ath_hal *ah = sc->sc_ah;
4467 /* XXX return value */
4471 if (!sc->sc_invalid) {
4472 /* don't touch the hardware if marked invalid */
4473 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
4474 __func__, sc->sc_bhalq,
4475 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq),
4477 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
4478 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4479 if (ATH_TXQ_SETUP(sc, i))
4480 ath_tx_stopdma(sc, &sc->sc_txq[i]);
4487 * Drain the transmit queues and reclaim resources.
4490 ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
4493 struct ath_hal *ah = sc->sc_ah;
4495 struct ifnet *ifp = sc->sc_ifp;
4498 (void) ath_stoptxdma(sc);
4500 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
4502 * XXX TODO: should we just handle the completed TX frames
4503 * here, whether or not the reset is a full one or not?
4505 if (ATH_TXQ_SETUP(sc, i)) {
4506 if (reset_type == ATH_RESET_NOLOSS)
4507 ath_tx_processq(sc, &sc->sc_txq[i], 0);
4509 ath_tx_draintxq(sc, &sc->sc_txq[i]);
4513 if (sc->sc_debug & ATH_DEBUG_RESET) {
4514 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf);
4515 if (bf != NULL && bf->bf_m != NULL) {
4516 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0,
4517 ath_hal_txprocdesc(ah, bf->bf_lastds,
4518 &bf->bf_status.ds_txstat) == HAL_OK);
4519 ieee80211_dump_pkt(ifp->if_l2com,
4520 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len,
4524 #endif /* ATH_DEBUG */
4525 IF_LOCK(&ifp->if_snd);
4526 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4527 IF_UNLOCK(&ifp->if_snd);
4528 sc->sc_wd_timer = 0;
4532 * Update internal state after a channel change.
4535 ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
4537 enum ieee80211_phymode mode;
4540 * Change channels and update the h/w rate map
4541 * if we're switching; e.g. 11a to 11b/g.
4543 mode = ieee80211_chan2mode(chan);
4544 if (mode != sc->sc_curmode)
4545 ath_setcurmode(sc, mode);
4546 sc->sc_curchan = chan;
4550 * Set/change channels. If the channel is really being changed,
4551 * it's done by resetting the chip. To accomplish this we must
4552 * first cleanup any pending DMA, then restart stuff after a la
4556 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
4558 struct ifnet *ifp = sc->sc_ifp;
4559 struct ieee80211com *ic = ifp->if_l2com;
4560 struct ath_hal *ah = sc->sc_ah;
4563 /* Treat this as an interface reset */
4564 ATH_PCU_UNLOCK_ASSERT(sc);
4565 ATH_UNLOCK_ASSERT(sc);
4567 /* (Try to) stop TX/RX from occuring */
4568 taskqueue_block(sc->sc_tq);
4571 ath_hal_intrset(ah, 0); /* Stop new RX/TX completion */
4572 ath_txrx_stop_locked(sc); /* Stop pending RX/TX completion */
4573 if (ath_reset_grablock(sc, 1) == 0) {
4574 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
4579 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n",
4580 __func__, ieee80211_chan2ieee(ic, chan),
4581 chan->ic_freq, chan->ic_flags);
4582 if (chan != sc->sc_curchan) {
4585 * To switch channels clear any pending DMA operations;
4586 * wait long enough for the RX fifo to drain, reset the
4587 * hardware at the new frequency, and then re-enable
4588 * the relevant bits of the h/w.
4591 ath_hal_intrset(ah, 0); /* disable interrupts */
4593 ath_stoprecv(sc, 1); /* turn off frame recv */
4595 * First, handle completed TX/RX frames.
4598 ath_draintxq(sc, ATH_RESET_NOLOSS);
4600 * Next, flush the non-scheduled frames.
4602 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */
4604 ath_update_chainmasks(sc, chan);
4605 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
4606 sc->sc_cur_rxchainmask);
4607 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) {
4608 if_printf(ifp, "%s: unable to reset "
4609 "channel %u (%u MHz, flags 0x%x), hal status %u\n",
4610 __func__, ieee80211_chan2ieee(ic, chan),
4611 chan->ic_freq, chan->ic_flags, status);
4615 sc->sc_diversity = ath_hal_getdiversity(ah);
4617 /* Let DFS at it in case it's a DFS channel */
4618 ath_dfs_radar_enable(sc, chan);
4620 /* Let spectral at in case spectral is enabled */
4621 ath_spectral_enable(sc, chan);
4624 * Re-enable rx framework.
4626 if (ath_startrecv(sc) != 0) {
4627 if_printf(ifp, "%s: unable to restart recv logic\n",
4634 * Change channels and update the h/w rate map
4635 * if we're switching; e.g. 11a to 11b/g.
4637 ath_chan_change(sc, chan);
4640 * Reset clears the beacon timers; reset them
4643 if (sc->sc_beacons) { /* restart beacons */
4644 #ifdef IEEE80211_SUPPORT_TDMA
4646 ath_tdma_config(sc, NULL);
4649 ath_beacon_config(sc, NULL);
4653 * Re-enable interrupts.
4656 ath_hal_intrset(ah, sc->sc_imask);
4662 sc->sc_inreset_cnt--;
4663 /* XXX only do this if sc_inreset_cnt == 0? */
4664 ath_hal_intrset(ah, sc->sc_imask);
4667 IF_LOCK(&ifp->if_snd);
4668 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4669 IF_UNLOCK(&ifp->if_snd);
4671 /* XXX ath_start? */
4677 * Periodically recalibrate the PHY to account
4678 * for temperature/environment changes.
4681 ath_calibrate(void *arg)
4683 struct ath_softc *sc = arg;
4684 struct ath_hal *ah = sc->sc_ah;
4685 struct ifnet *ifp = sc->sc_ifp;
4686 struct ieee80211com *ic = ifp->if_l2com;
4687 HAL_BOOL longCal, isCalDone = AH_TRUE;
4688 HAL_BOOL aniCal, shortCal = AH_FALSE;
4691 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */
4693 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz);
4694 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000);
4695 if (sc->sc_doresetcal)
4696 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000);
4698 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal);
4700 sc->sc_stats.ast_ani_cal++;
4701 sc->sc_lastani = ticks;
4702 ath_hal_ani_poll(ah, sc->sc_curchan);
4706 sc->sc_stats.ast_per_cal++;
4707 sc->sc_lastlongcal = ticks;
4708 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
4710 * Rfgain is out of bounds, reset the chip
4711 * to load new gain values.
4713 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
4714 "%s: rfgain change\n", __func__);
4715 sc->sc_stats.ast_per_rfgain++;
4716 sc->sc_resetcal = 0;
4717 sc->sc_doresetcal = AH_TRUE;
4718 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask);
4719 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
4723 * If this long cal is after an idle period, then
4724 * reset the data collection state so we start fresh.
4726 if (sc->sc_resetcal) {
4727 (void) ath_hal_calreset(ah, sc->sc_curchan);
4728 sc->sc_lastcalreset = ticks;
4729 sc->sc_lastshortcal = ticks;
4730 sc->sc_resetcal = 0;
4731 sc->sc_doresetcal = AH_TRUE;
4735 /* Only call if we're doing a short/long cal, not for ANI calibration */
4736 if (shortCal || longCal) {
4737 isCalDone = AH_FALSE;
4738 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) {
4741 * Calibrate noise floor data again in case of change.
4743 ath_hal_process_noisefloor(ah);
4746 DPRINTF(sc, ATH_DEBUG_ANY,
4747 "%s: calibration of channel %u failed\n",
4748 __func__, sc->sc_curchan->ic_freq);
4749 sc->sc_stats.ast_per_calfail++;
4752 sc->sc_lastshortcal = ticks;
4757 * Use a shorter interval to potentially collect multiple
4758 * data samples required to complete calibration. Once
4759 * we're told the work is done we drop back to a longer
4760 * interval between requests. We're more aggressive doing
4761 * work when operating as an AP to improve operation right
4764 sc->sc_lastshortcal = ticks;
4765 nextcal = ath_shortcalinterval*hz/1000;
4766 if (sc->sc_opmode != HAL_M_HOSTAP)
4768 sc->sc_doresetcal = AH_TRUE;
4770 /* nextcal should be the shortest time for next event */
4771 nextcal = ath_longcalinterval*hz;
4772 if (sc->sc_lastcalreset == 0)
4773 sc->sc_lastcalreset = sc->sc_lastlongcal;
4774 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz)
4775 sc->sc_resetcal = 1; /* setup reset next trip */
4776 sc->sc_doresetcal = AH_FALSE;
4778 /* ANI calibration may occur more often than short/long/resetcal */
4779 if (ath_anicalinterval > 0)
4780 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000);
4783 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n",
4784 __func__, nextcal, isCalDone ? "" : "!");
4785 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc);
4787 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n",
4789 /* NB: don't rearm timer */
4794 ath_scan_start(struct ieee80211com *ic)
4796 struct ifnet *ifp = ic->ic_ifp;
4797 struct ath_softc *sc = ifp->if_softc;
4798 struct ath_hal *ah = sc->sc_ah;
4801 /* XXX calibration timer? */
4804 sc->sc_scanning = 1;
4805 sc->sc_syncbeacon = 0;
4806 rfilt = ath_calcrxfilter(sc);
4810 ath_hal_setrxfilter(ah, rfilt);
4811 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0);
4814 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
4815 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr));
4819 ath_scan_end(struct ieee80211com *ic)
4821 struct ifnet *ifp = ic->ic_ifp;
4822 struct ath_softc *sc = ifp->if_softc;
4823 struct ath_hal *ah = sc->sc_ah;
4827 sc->sc_scanning = 0;
4828 rfilt = ath_calcrxfilter(sc);
4832 ath_hal_setrxfilter(ah, rfilt);
4833 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
4835 ath_hal_process_noisefloor(ah);
4838 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
4839 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
4843 #ifdef ATH_ENABLE_11N
4845 * For now, just do a channel change.
4847 * Later, we'll go through the hard slog of suspending tx/rx, changing rate
4848 * control state and resetting the hardware without dropping frames out
4851 * The unfortunate trouble here is making absolutely sure that the
4852 * channel width change has propagated enough so the hardware
4853 * absolutely isn't handed bogus frames for it's current operating
4854 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and
4855 * does occur in parallel, we need to make certain we've blocked
4856 * any further ongoing TX (and RX, that can cause raw TX)
4857 * before we do this.
4860 ath_update_chw(struct ieee80211com *ic)
4862 struct ifnet *ifp = ic->ic_ifp;
4863 struct ath_softc *sc = ifp->if_softc;
4865 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__);
4866 ath_set_channel(ic);
4868 #endif /* ATH_ENABLE_11N */
4871 ath_set_channel(struct ieee80211com *ic)
4873 struct ifnet *ifp = ic->ic_ifp;
4874 struct ath_softc *sc = ifp->if_softc;
4876 (void) ath_chan_set(sc, ic->ic_curchan);
4878 * If we are returning to our bss channel then mark state
4879 * so the next recv'd beacon's tsf will be used to sync the
4880 * beacon timers. Note that since we only hear beacons in
4881 * sta/ibss mode this has no effect in other operating modes.
4884 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
4885 sc->sc_syncbeacon = 1;
4890 * Walk the vap list and check if there any vap's in RUN state.
4893 ath_isanyrunningvaps(struct ieee80211vap *this)
4895 struct ieee80211com *ic = this->iv_ic;
4896 struct ieee80211vap *vap;
4898 IEEE80211_LOCK_ASSERT(ic);
4900 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
4901 if (vap != this && vap->iv_state >= IEEE80211_S_RUN)
4908 ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4910 struct ieee80211com *ic = vap->iv_ic;
4911 struct ath_softc *sc = ic->ic_ifp->if_softc;
4912 struct ath_vap *avp = ATH_VAP(vap);
4913 struct ath_hal *ah = sc->sc_ah;
4914 struct ieee80211_node *ni = NULL;
4915 int i, error, stamode;
4917 int csa_run_transition = 0;
4919 static const HAL_LED_STATE leds[] = {
4920 HAL_LED_INIT, /* IEEE80211_S_INIT */
4921 HAL_LED_SCAN, /* IEEE80211_S_SCAN */
4922 HAL_LED_AUTH, /* IEEE80211_S_AUTH */
4923 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */
4924 HAL_LED_RUN, /* IEEE80211_S_CAC */
4925 HAL_LED_RUN, /* IEEE80211_S_RUN */
4926 HAL_LED_RUN, /* IEEE80211_S_CSA */
4927 HAL_LED_RUN, /* IEEE80211_S_SLEEP */
4930 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
4931 ieee80211_state_name[vap->iv_state],
4932 ieee80211_state_name[nstate]);
4935 * net80211 _should_ have the comlock asserted at this point.
4936 * There are some comments around the calls to vap->iv_newstate
4937 * which indicate that it (newstate) may end up dropping the
4938 * lock. This and the subsequent lock assert check after newstate
4939 * are an attempt to catch these and figure out how/why.
4941 IEEE80211_LOCK_ASSERT(ic);
4943 if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN)
4944 csa_run_transition = 1;
4946 callout_drain(&sc->sc_cal_ch);
4947 ath_hal_setledstate(ah, leds[nstate]); /* set LED */
4949 if (nstate == IEEE80211_S_SCAN) {
4951 * Scanning: turn off beacon miss and don't beacon.
4952 * Mark beacon state so when we reach RUN state we'll
4953 * [re]setup beacons. Unblock the task q thread so
4954 * deferred interrupt processing is done.
4957 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
4958 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
4960 taskqueue_unblock(sc->sc_tq);
4963 ni = ieee80211_ref_node(vap->iv_bss);
4964 rfilt = ath_calcrxfilter(sc);
4965 stamode = (vap->iv_opmode == IEEE80211_M_STA ||
4966 vap->iv_opmode == IEEE80211_M_AHDEMO ||
4967 vap->iv_opmode == IEEE80211_M_IBSS);
4968 if (stamode && nstate == IEEE80211_S_RUN) {
4969 sc->sc_curaid = ni->ni_associd;
4970 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
4971 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
4973 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
4974 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid);
4975 ath_hal_setrxfilter(ah, rfilt);
4977 /* XXX is this to restore keycache on resume? */
4978 if (vap->iv_opmode != IEEE80211_M_STA &&
4979 (vap->iv_flags & IEEE80211_F_PRIVACY)) {
4980 for (i = 0; i < IEEE80211_WEP_NKID; i++)
4981 if (ath_hal_keyisvalid(ah, i))
4982 ath_hal_keysetmac(ah, i, ni->ni_bssid);
4986 * Invoke the parent method to do net80211 work.
4988 error = avp->av_newstate(vap, nstate, arg);
4993 * See above: ensure av_newstate() doesn't drop the lock
4996 IEEE80211_LOCK_ASSERT(ic);
4998 if (nstate == IEEE80211_S_RUN) {
4999 /* NB: collect bss node again, it may have changed */
5000 ieee80211_free_node(ni);
5001 ni = ieee80211_ref_node(vap->iv_bss);
5003 DPRINTF(sc, ATH_DEBUG_STATE,
5004 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
5005 "capinfo 0x%04x chan %d\n", __func__,
5006 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid),
5007 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan));
5009 switch (vap->iv_opmode) {
5010 #ifdef IEEE80211_SUPPORT_TDMA
5011 case IEEE80211_M_AHDEMO:
5012 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
5016 case IEEE80211_M_HOSTAP:
5017 case IEEE80211_M_IBSS:
5018 case IEEE80211_M_MBSS:
5020 * Allocate and setup the beacon frame.
5022 * Stop any previous beacon DMA. This may be
5023 * necessary, for example, when an ibss merge
5024 * causes reconfiguration; there will be a state
5025 * transition from RUN->RUN that means we may
5026 * be called with beacon transmission active.
5028 ath_hal_stoptxdma(ah, sc->sc_bhalq);
5030 error = ath_beacon_alloc(sc, ni);
5034 * If joining an adhoc network defer beacon timer
5035 * configuration to the next beacon frame so we
5036 * have a current TSF to use. Otherwise we're
5037 * starting an ibss/bss so there's no need to delay;
5038 * if this is the first vap moving to RUN state, then
5039 * beacon state needs to be [re]configured.
5041 if (vap->iv_opmode == IEEE80211_M_IBSS &&
5042 ni->ni_tstamp.tsf != 0) {
5043 sc->sc_syncbeacon = 1;
5044 } else if (!sc->sc_beacons) {
5045 #ifdef IEEE80211_SUPPORT_TDMA
5046 if (vap->iv_caps & IEEE80211_C_TDMA)
5047 ath_tdma_config(sc, vap);
5050 ath_beacon_config(sc, vap);
5054 case IEEE80211_M_STA:
5056 * Defer beacon timer configuration to the next
5057 * beacon frame so we have a current TSF to use
5058 * (any TSF collected when scanning is likely old).
5059 * However if it's due to a CSA -> RUN transition,
5060 * force a beacon update so we pick up a lack of
5061 * beacons from an AP in CAC and thus force a
5064 * And, there's also corner cases here where
5065 * after a scan, the AP may have disappeared.
5066 * In that case, we may not receive an actual
5067 * beacon to update the beacon timer and thus we
5068 * won't get notified of the missing beacons.
5070 sc->sc_syncbeacon = 1;
5072 if (csa_run_transition)
5074 ath_beacon_config(sc, vap);
5079 * Reconfigure beacons during reset; as otherwise
5080 * we won't get the beacon timers reprogrammed
5081 * after a reset and thus we won't pick up a
5082 * beacon miss interrupt.
5084 * Hopefully we'll see a beacon before the BMISS
5085 * timer fires (too often), leading to a STA
5090 case IEEE80211_M_MONITOR:
5092 * Monitor mode vaps have only INIT->RUN and RUN->RUN
5093 * transitions so we must re-enable interrupts here to
5094 * handle the case of a single monitor mode vap.
5096 ath_hal_intrset(ah, sc->sc_imask);
5098 case IEEE80211_M_WDS:
5104 * Let the hal process statistics collected during a
5105 * scan so it can provide calibrated noise floor data.
5107 ath_hal_process_noisefloor(ah);
5109 * Reset rssi stats; maybe not the best place...
5111 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
5112 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
5113 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
5115 * Finally, start any timers and the task q thread
5116 * (in case we didn't go through SCAN state).
5118 if (ath_longcalinterval != 0) {
5119 /* start periodic recalibration timer */
5120 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
5122 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5123 "%s: calibration disabled\n", __func__);
5125 taskqueue_unblock(sc->sc_tq);
5126 } else if (nstate == IEEE80211_S_INIT) {
5128 * If there are no vaps left in RUN state then
5129 * shutdown host/driver operation:
5130 * o disable interrupts
5131 * o disable the task queue thread
5132 * o mark beacon processing as stopped
5134 if (!ath_isanyrunningvaps(vap)) {
5135 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5136 /* disable interrupts */
5137 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
5138 taskqueue_block(sc->sc_tq);
5141 #ifdef IEEE80211_SUPPORT_TDMA
5142 ath_hal_setcca(ah, AH_TRUE);
5146 ieee80211_free_node(ni);
5151 * Allocate a key cache slot to the station so we can
5152 * setup a mapping from key index to node. The key cache
5153 * slot is needed for managing antenna state and for
5154 * compression when stations do not use crypto. We do
5155 * it uniliaterally here; if crypto is employed this slot
5156 * will be reassigned.
5159 ath_setup_stationkey(struct ieee80211_node *ni)
5161 struct ieee80211vap *vap = ni->ni_vap;
5162 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
5163 ieee80211_keyix keyix, rxkeyix;
5165 /* XXX should take a locked ref to vap->iv_bss */
5166 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) {
5168 * Key cache is full; we'll fall back to doing
5169 * the more expensive lookup in software. Note
5170 * this also means no h/w compression.
5172 /* XXX msg+statistic */
5175 ni->ni_ucastkey.wk_keyix = keyix;
5176 ni->ni_ucastkey.wk_rxkeyix = rxkeyix;
5177 /* NB: must mark device key to get called back on delete */
5178 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY;
5179 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr);
5180 /* NB: this will create a pass-thru key entry */
5181 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss);
5186 * Setup driver-specific state for a newly associated node.
5187 * Note that we're called also on a re-associate, the isnew
5188 * param tells us if this is the first time or not.
5191 ath_newassoc(struct ieee80211_node *ni, int isnew)
5193 struct ath_node *an = ATH_NODE(ni);
5194 struct ieee80211vap *vap = ni->ni_vap;
5195 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
5196 const struct ieee80211_txparam *tp = ni->ni_txparms;
5198 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate);
5199 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate);
5201 ath_rate_newassoc(sc, an, isnew);
5203 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey &&
5204 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
5205 ath_setup_stationkey(ni);
5209 ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg,
5210 int nchans, struct ieee80211_channel chans[])
5212 struct ath_softc *sc = ic->ic_ifp->if_softc;
5213 struct ath_hal *ah = sc->sc_ah;
5216 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
5217 "%s: rd %u cc %u location %c%s\n",
5218 __func__, reg->regdomain, reg->country, reg->location,
5219 reg->ecm ? " ecm" : "");
5221 status = ath_hal_set_channels(ah, chans, nchans,
5222 reg->country, reg->regdomain);
5223 if (status != HAL_OK) {
5224 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n",
5226 return EINVAL; /* XXX */
5233 ath_getradiocaps(struct ieee80211com *ic,
5234 int maxchans, int *nchans, struct ieee80211_channel chans[])
5236 struct ath_softc *sc = ic->ic_ifp->if_softc;
5237 struct ath_hal *ah = sc->sc_ah;
5239 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n",
5240 __func__, SKU_DEBUG, CTRY_DEFAULT);
5242 /* XXX check return */
5243 (void) ath_hal_getchannels(ah, chans, maxchans, nchans,
5244 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE);
5249 ath_getchannels(struct ath_softc *sc)
5251 struct ifnet *ifp = sc->sc_ifp;
5252 struct ieee80211com *ic = ifp->if_l2com;
5253 struct ath_hal *ah = sc->sc_ah;
5257 * Collect channel set based on EEPROM contents.
5259 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX,
5260 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE);
5261 if (status != HAL_OK) {
5262 if_printf(ifp, "%s: unable to collect channel list from hal, "
5263 "status %d\n", __func__, status);
5266 (void) ath_hal_getregdomain(ah, &sc->sc_eerd);
5267 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */
5268 /* XXX map Atheros sku's to net80211 SKU's */
5269 /* XXX net80211 types too small */
5270 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd;
5271 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc;
5272 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */
5273 ic->ic_regdomain.isocc[1] = ' ';
5275 ic->ic_regdomain.ecm = 1;
5276 ic->ic_regdomain.location = 'I';
5278 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
5279 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n",
5280 __func__, sc->sc_eerd, sc->sc_eecc,
5281 ic->ic_regdomain.regdomain, ic->ic_regdomain.country,
5282 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : "");
5287 ath_rate_setup(struct ath_softc *sc, u_int mode)
5289 struct ath_hal *ah = sc->sc_ah;
5290 const HAL_RATE_TABLE *rt;
5293 case IEEE80211_MODE_11A:
5294 rt = ath_hal_getratetable(ah, HAL_MODE_11A);
5296 case IEEE80211_MODE_HALF:
5297 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
5299 case IEEE80211_MODE_QUARTER:
5300 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
5302 case IEEE80211_MODE_11B:
5303 rt = ath_hal_getratetable(ah, HAL_MODE_11B);
5305 case IEEE80211_MODE_11G:
5306 rt = ath_hal_getratetable(ah, HAL_MODE_11G);
5308 case IEEE80211_MODE_TURBO_A:
5309 rt = ath_hal_getratetable(ah, HAL_MODE_108A);
5311 case IEEE80211_MODE_TURBO_G:
5312 rt = ath_hal_getratetable(ah, HAL_MODE_108G);
5314 case IEEE80211_MODE_STURBO_A:
5315 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
5317 case IEEE80211_MODE_11NA:
5318 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20);
5320 case IEEE80211_MODE_11NG:
5321 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20);
5324 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
5328 sc->sc_rates[mode] = rt;
5329 return (rt != NULL);
5333 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
5335 #define N(a) (sizeof(a)/sizeof(a[0]))
5336 /* NB: on/off times from the Atheros NDIS driver, w/ permission */
5337 static const struct {
5338 u_int rate; /* tx/rx 802.11 rate */
5339 u_int16_t timeOn; /* LED on time (ms) */
5340 u_int16_t timeOff; /* LED off time (ms) */
5356 /* XXX half/quarter rates */
5358 const HAL_RATE_TABLE *rt;
5361 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
5362 rt = sc->sc_rates[mode];
5363 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
5364 for (i = 0; i < rt->rateCount; i++) {
5365 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
5366 if (rt->info[i].phy != IEEE80211_T_HT)
5367 sc->sc_rixmap[ieeerate] = i;
5369 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i;
5371 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
5372 for (i = 0; i < N(sc->sc_hwmap); i++) {
5373 if (i >= rt->rateCount) {
5374 sc->sc_hwmap[i].ledon = (500 * hz) / 1000;
5375 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000;
5378 sc->sc_hwmap[i].ieeerate =
5379 rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
5380 if (rt->info[i].phy == IEEE80211_T_HT)
5381 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS;
5382 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
5383 if (rt->info[i].shortPreamble ||
5384 rt->info[i].phy == IEEE80211_T_OFDM)
5385 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE;
5386 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags;
5387 for (j = 0; j < N(blinkrates)-1; j++)
5388 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
5390 /* NB: this uses the last entry if the rate isn't found */
5391 /* XXX beware of overlow */
5392 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000;
5393 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000;
5395 sc->sc_currates = rt;
5396 sc->sc_curmode = mode;
5398 * All protection frames are transmited at 2Mb/s for
5399 * 11g, otherwise at 1Mb/s.
5401 if (mode == IEEE80211_MODE_11G)
5402 sc->sc_protrix = ath_tx_findrix(sc, 2*2);
5404 sc->sc_protrix = ath_tx_findrix(sc, 2*1);
5405 /* NB: caller is responsible for resetting rate control state */
5410 ath_watchdog(void *arg)
5412 struct ath_softc *sc = arg;
5415 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) {
5416 struct ifnet *ifp = sc->sc_ifp;
5419 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) &&
5421 if_printf(ifp, "%s hang detected (0x%x)\n",
5422 hangs & 0xff ? "bb" : "mac", hangs);
5424 if_printf(ifp, "device timeout\n");
5427 sc->sc_stats.ast_watchdog++;
5431 * We can't hold the lock across the ath_reset() call.
5433 * And since this routine can't hold a lock and sleep,
5434 * do the reset deferred.
5437 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask);
5440 callout_schedule(&sc->sc_wd_ch, hz);
5444 * Fetch the rate control statistics for the given node.
5447 ath_ioctl_ratestats(struct ath_softc *sc, struct ath_rateioctl *rs)
5449 struct ath_node *an;
5450 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
5451 struct ieee80211_node *ni;
5454 /* Perform a lookup on the given node */
5455 ni = ieee80211_find_node(&ic->ic_sta, rs->is_u.macaddr);
5461 /* Lock the ath_node */
5465 /* Fetch the rate control stats for this node */
5466 error = ath_rate_fetch_node_stats(sc, an, rs);
5468 /* No matter what happens here, just drop through */
5470 /* Unlock the ath_node */
5471 ATH_NODE_UNLOCK(an);
5473 /* Unref the node */
5474 ieee80211_node_decref(ni);
5482 * Diagnostic interface to the HAL. This is used by various
5483 * tools to do things like retrieve register contents for
5484 * debugging. The mechanism is intentionally opaque so that
5485 * it can change frequently w/o concern for compatiblity.
5488 ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
5490 struct ath_hal *ah = sc->sc_ah;
5491 u_int id = ad->ad_id & ATH_DIAG_ID;
5492 void *indata = NULL;
5493 void *outdata = NULL;
5494 u_int32_t insize = ad->ad_in_size;
5495 u_int32_t outsize = ad->ad_out_size;
5498 if (ad->ad_id & ATH_DIAG_IN) {
5502 indata = malloc(insize, M_TEMP, M_NOWAIT);
5503 if (indata == NULL) {
5507 error = copyin(ad->ad_in_data, indata, insize);
5511 if (ad->ad_id & ATH_DIAG_DYN) {
5513 * Allocate a buffer for the results (otherwise the HAL
5514 * returns a pointer to a buffer where we can read the
5515 * results). Note that we depend on the HAL leaving this
5516 * pointer for us to use below in reclaiming the buffer;
5517 * may want to be more defensive.
5519 outdata = malloc(outsize, M_TEMP, M_NOWAIT);
5520 if (outdata == NULL) {
5525 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
5526 if (outsize < ad->ad_out_size)
5527 ad->ad_out_size = outsize;
5528 if (outdata != NULL)
5529 error = copyout(outdata, ad->ad_out_data,
5535 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
5536 free(indata, M_TEMP);
5537 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL)
5538 free(outdata, M_TEMP);
5541 #endif /* ATH_DIAGAPI */
5544 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
5546 #define IS_RUNNING(ifp) \
5547 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
5548 struct ath_softc *sc = ifp->if_softc;
5549 struct ieee80211com *ic = ifp->if_l2com;
5550 struct ifreq *ifr = (struct ifreq *)data;
5551 const HAL_RATE_TABLE *rt;
5557 if (IS_RUNNING(ifp)) {
5559 * To avoid rescanning another access point,
5560 * do not call ath_init() here. Instead,
5561 * only reflect promisc mode settings.
5564 } else if (ifp->if_flags & IFF_UP) {
5566 * Beware of being called during attach/detach
5567 * to reset promiscuous mode. In that case we
5568 * will still be marked UP but not RUNNING.
5569 * However trying to re-init the interface
5570 * is the wrong thing to do as we've already
5571 * torn down much of our state. There's
5572 * probably a better way to deal with this.
5574 if (!sc->sc_invalid)
5575 ath_init(sc); /* XXX lose error */
5577 ath_stop_locked(ifp);
5579 /* XXX must wakeup in places like ath_vap_delete */
5580 if (!sc->sc_invalid)
5581 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP);
5588 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
5591 /* NB: embed these numbers to get a consistent view */
5592 sc->sc_stats.ast_tx_packets = ifp->if_opackets;
5593 sc->sc_stats.ast_rx_packets = ifp->if_ipackets;
5594 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi);
5595 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi);
5596 #ifdef IEEE80211_SUPPORT_TDMA
5597 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap);
5598 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam);
5600 rt = sc->sc_currates;
5601 sc->sc_stats.ast_tx_rate =
5602 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC;
5603 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT)
5604 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS;
5605 return copyout(&sc->sc_stats,
5606 ifr->ifr_data, sizeof (sc->sc_stats));
5607 case SIOCGATHAGSTATS:
5608 return copyout(&sc->sc_aggr_stats,
5609 ifr->ifr_data, sizeof (sc->sc_aggr_stats));
5611 error = priv_check(curthread, PRIV_DRIVER);
5613 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats));
5614 memset(&sc->sc_aggr_stats, 0,
5615 sizeof(sc->sc_aggr_stats));
5616 memset(&sc->sc_intr_stats, 0,
5617 sizeof(sc->sc_intr_stats));
5622 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr);
5624 case SIOCGATHPHYERR:
5625 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr);
5628 case SIOCGATHSPECTRAL:
5629 error = ath_ioctl_spectral(sc,(struct ath_diag*) ifr);
5631 case SIOCGATHNODERATESTATS:
5632 error = ath_ioctl_ratestats(sc, (struct ath_rateioctl *) ifr);
5635 error = ether_ioctl(ifp, cmd, data);
5646 * Announce various information on device/driver attach.
5649 ath_announce(struct ath_softc *sc)
5651 struct ifnet *ifp = sc->sc_ifp;
5652 struct ath_hal *ah = sc->sc_ah;
5654 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n",
5655 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev,
5656 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
5657 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n",
5658 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev);
5661 for (i = 0; i <= WME_AC_VO; i++) {
5662 struct ath_txq *txq = sc->sc_ac2q[i];
5663 if_printf(ifp, "Use hw queue %u for %s traffic\n",
5664 txq->axq_qnum, ieee80211_wme_acnames[i]);
5666 if_printf(ifp, "Use hw queue %u for CAB traffic\n",
5667 sc->sc_cabq->axq_qnum);
5668 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq);
5670 if (ath_rxbuf != ATH_RXBUF)
5671 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf);
5672 if (ath_txbuf != ATH_TXBUF)
5673 if_printf(ifp, "using %u tx buffers\n", ath_txbuf);
5674 if (sc->sc_mcastkey && bootverbose)
5675 if_printf(ifp, "using multicast key search\n");
5679 ath_dfs_tasklet(void *p, int npending)
5681 struct ath_softc *sc = (struct ath_softc *) p;
5682 struct ifnet *ifp = sc->sc_ifp;
5683 struct ieee80211com *ic = ifp->if_l2com;
5686 * If previous processing has found a radar event,
5687 * signal this to the net80211 layer to begin DFS
5690 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) {
5691 /* DFS event found, initiate channel change */
5693 * XXX doesn't currently tell us whether the event
5694 * XXX was found in the primary or extension
5698 ieee80211_dfs_notify_radar(ic, sc->sc_curchan);
5699 IEEE80211_UNLOCK(ic);
5704 * Enable/disable power save. This must be called with
5705 * no TX driver locks currently held, so it should only
5706 * be called from the RX path (which doesn't hold any
5710 ath_node_powersave(struct ieee80211_node *ni, int enable)
5713 struct ath_node *an = ATH_NODE(ni);
5714 struct ieee80211com *ic = ni->ni_ic;
5715 struct ath_softc *sc = ic->ic_ifp->if_softc;
5716 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
5718 ATH_NODE_UNLOCK_ASSERT(an);
5719 /* XXX and no TXQ locks should be held here */
5721 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: ni=%p, enable=%d\n",
5722 __func__, ni, enable);
5724 /* Suspend or resume software queue handling */
5726 ath_tx_node_sleep(sc, an);
5728 ath_tx_node_wakeup(sc, an);
5730 /* Update net80211 state */
5731 avp->av_node_ps(ni, enable);
5733 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
5735 /* Update net80211 state */
5736 avp->av_node_ps(ni, enable);
5737 #endif/* ATH_SW_PSQ */
5741 * Notification from net80211 that the powersave queue state has
5744 * Since the software queue also may have some frames:
5746 * + if the node software queue has frames and the TID state
5747 * is 0, we set the TIM;
5748 * + if the node and the stack are both empty, we clear the TIM bit.
5749 * + If the stack tries to set the bit, always set it.
5750 * + If the stack tries to clear the bit, only clear it if the
5751 * software queue in question is also cleared.
5753 * TODO: this is called during node teardown; so let's ensure this
5754 * is all correctly handled and that the TIM bit is cleared.
5755 * It may be that the node flush is called _AFTER_ the net80211
5756 * stack clears the TIM.
5758 * Here is the racy part. Since it's possible >1 concurrent,
5759 * overlapping TXes will appear complete with a TX completion in
5760 * another thread, it's possible that the concurrent TIM calls will
5761 * clash. We can't hold the node lock here because setting the
5762 * TIM grabs the net80211 comlock and this may cause a LOR.
5763 * The solution is either to totally serialise _everything_ at
5764 * this point (ie, all TX, completion and any reset/flush go into
5765 * one taskqueue) or a new "ath TIM lock" needs to be created that
5766 * just wraps the driver state change and this call to avp->av_set_tim().
5768 * The same race exists in the net80211 power save queue handling
5769 * as well. Since multiple transmitting threads may queue frames
5770 * into the driver, as well as ps-poll and the driver transmitting
5771 * frames (and thus clearing the psq), it's quite possible that
5772 * a packet entering the PSQ and a ps-poll being handled will
5773 * race, causing the TIM to be cleared and not re-set.
5776 ath_node_set_tim(struct ieee80211_node *ni, int enable)
5779 struct ieee80211com *ic = ni->ni_ic;
5780 struct ath_softc *sc = ic->ic_ifp->if_softc;
5781 struct ath_node *an = ATH_NODE(ni);
5782 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
5785 ATH_NODE_UNLOCK_ASSERT(an);
5788 * For now, just track and then update the TIM.
5791 an->an_stack_psq = enable;
5794 * This will get called for all operating modes,
5795 * even if avp->av_set_tim is unset.
5796 * It's currently set for hostap/ibss modes; but
5797 * the same infrastructure is used for both STA
5798 * and AP/IBSS node power save.
5800 if (avp->av_set_tim == NULL) {
5801 ATH_NODE_UNLOCK(an);
5806 * If setting the bit, always set it here.
5807 * If clearing the bit, only clear it if the
5808 * software queue is also empty.
5810 * If the node has left power save, just clear the TIM
5811 * bit regardless of the state of the power save queue.
5813 * XXX TODO: although atomics are used, it's quite possible
5814 * that a race will occur between this and setting/clearing
5815 * in another thread. TX completion will occur always in
5816 * one thread, however setting/clearing the TIM bit can come
5817 * from a variety of different process contexts!
5819 if (enable && an->an_tim_set == 1) {
5820 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5821 "%s: an=%p, enable=%d, tim_set=1, ignoring\n",
5822 __func__, an, enable);
5823 ATH_NODE_UNLOCK(an);
5824 } else if (enable) {
5825 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5826 "%s: an=%p, enable=%d, enabling TIM\n",
5827 __func__, an, enable);
5829 ATH_NODE_UNLOCK(an);
5830 changed = avp->av_set_tim(ni, enable);
5831 } else if (atomic_load_acq_int(&an->an_swq_depth) == 0) {
5833 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5834 "%s: an=%p, enable=%d, an_swq_depth == 0, disabling\n",
5835 __func__, an, enable);
5837 ATH_NODE_UNLOCK(an);
5838 changed = avp->av_set_tim(ni, enable);
5839 } else if (! an->an_is_powersave) {
5841 * disable regardless; the node isn't in powersave now
5843 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5844 "%s: an=%p, enable=%d, an_pwrsave=0, disabling\n",
5845 __func__, an, enable);
5847 ATH_NODE_UNLOCK(an);
5848 changed = avp->av_set_tim(ni, enable);
5851 * psq disable, node is currently in powersave, node
5852 * software queue isn't empty, so don't clear the TIM bit
5855 ATH_NODE_UNLOCK(an);
5856 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5857 "%s: enable=%d, an_swq_depth > 0, ignoring\n",
5864 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
5867 * Some operating modes don't set av_set_tim(), so don't
5870 if (avp->av_set_tim == NULL)
5873 return (avp->av_set_tim(ni, enable));
5874 #endif /* ATH_SW_PSQ */
5878 * Set or update the TIM from the software queue.
5880 * Check the software queue depth before attempting to do lock
5881 * anything; that avoids trying to obtain the lock. Then,
5882 * re-check afterwards to ensure nothing has changed in the
5885 * set: This is designed to be called from the TX path, after
5886 * a frame has been queued; to see if the swq > 0.
5888 * clear: This is designed to be called from the buffer completion point
5889 * (right now it's ath_tx_default_comp()) where the state of
5890 * a software queue has changed.
5892 * It makes sense to place it at buffer free / completion rather
5893 * than after each software queue operation, as there's no real
5894 * point in churning the TIM bit as the last frames in the software
5895 * queue are transmitted. If they fail and we retry them, we'd
5896 * just be setting the TIM bit again anyway.
5899 ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni,
5903 struct ath_node *an;
5904 struct ath_vap *avp;
5906 /* Don't do this for broadcast/etc frames */
5911 avp = ATH_VAP(ni->ni_vap);
5914 * And for operating modes without the TIM handler set, let's
5917 if (avp->av_set_tim == NULL)
5920 ATH_NODE_UNLOCK_ASSERT(an);
5924 * Don't bother grabbing the lock unless the queue is not
5927 if (atomic_load_acq_int(&an->an_swq_depth) == 0)
5931 if (an->an_is_powersave &&
5932 an->an_tim_set == 0 &&
5933 atomic_load_acq_int(&an->an_swq_depth) != 0) {
5934 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5935 "%s: an=%p, swq_depth>0, tim_set=0, set!\n",
5938 ATH_NODE_UNLOCK(an);
5939 (void) avp->av_set_tim(ni, 1);
5941 ATH_NODE_UNLOCK(an);
5945 * Don't bother grabbing the lock unless the queue is empty.
5947 if (atomic_load_acq_int(&an->an_swq_depth) != 0)
5951 if (an->an_is_powersave &&
5952 an->an_stack_psq == 0 &&
5953 an->an_tim_set == 1 &&
5954 atomic_load_acq_int(&an->an_swq_depth) == 0) {
5955 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
5956 "%s: an=%p, swq_depth=0, tim_set=1, psq_set=0,"
5960 ATH_NODE_UNLOCK(an);
5961 (void) avp->av_set_tim(ni, 0);
5963 ATH_NODE_UNLOCK(an);
5968 #endif /* ATH_SW_PSQ */
5971 MODULE_VERSION(if_ath, 1);
5972 MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */
5973 #if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ)
5974 MODULE_DEPEND(if_ath, alq, 1, 1, 1);