2 * Copyright (c) 2002-2007 Sam Leffler, Errno Consulting
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 * Driver for the Atheros Wireless LAN controller.
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/sysctl.h>
47 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/kernel.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/errno.h>
54 #include <sys/callout.h>
56 #include <sys/endian.h>
57 #include <sys/kthread.h>
58 #include <sys/taskqueue.h>
60 #include <machine/bus.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/if_types.h>
66 #include <net/if_arp.h>
67 #include <net/ethernet.h>
68 #include <net/if_llc.h>
70 #include <net80211/ieee80211_var.h>
75 #include <netinet/in.h>
76 #include <netinet/if_ether.h>
79 #include <dev/ath/if_athvar.h>
80 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
83 #include <dev/ath/ath_tx99/ath_tx99.h>
87 * We require a HAL w/ the changes for split tx/rx MIC.
89 CTASSERT(HAL_ABI_VERSION > 0x06052200);
91 /* unaligned little endian access */
92 #define LE_READ_2(p) \
94 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8)))
95 #define LE_READ_4(p) \
97 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \
98 (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24)))
106 static void ath_init(void *);
107 static void ath_stop_locked(struct ifnet *);
108 static void ath_stop(struct ifnet *);
109 static void ath_start(struct ifnet *);
110 static int ath_reset(struct ifnet *);
111 static int ath_media_change(struct ifnet *);
112 static void ath_watchdog(struct ifnet *);
113 static int ath_ioctl(struct ifnet *, u_long, caddr_t);
114 static void ath_fatal_proc(void *, int);
115 static void ath_rxorn_proc(void *, int);
116 static void ath_bmiss_proc(void *, int);
117 static int ath_key_alloc(struct ieee80211com *,
118 const struct ieee80211_key *,
119 ieee80211_keyix *, ieee80211_keyix *);
120 static int ath_key_delete(struct ieee80211com *,
121 const struct ieee80211_key *);
122 static int ath_key_set(struct ieee80211com *, const struct ieee80211_key *,
123 const u_int8_t mac[IEEE80211_ADDR_LEN]);
124 static void ath_key_update_begin(struct ieee80211com *);
125 static void ath_key_update_end(struct ieee80211com *);
126 static void ath_mode_init(struct ath_softc *);
127 static void ath_setslottime(struct ath_softc *);
128 static void ath_updateslot(struct ifnet *);
129 static int ath_beaconq_setup(struct ath_hal *);
130 static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *);
131 static void ath_beacon_update(struct ieee80211com *, int item);
132 static void ath_beacon_setup(struct ath_softc *, struct ath_buf *);
133 static void ath_beacon_proc(void *, int);
134 static void ath_bstuck_proc(void *, int);
135 static void ath_beacon_free(struct ath_softc *);
136 static void ath_beacon_config(struct ath_softc *);
137 static void ath_descdma_cleanup(struct ath_softc *sc,
138 struct ath_descdma *, ath_bufhead *);
139 static int ath_desc_alloc(struct ath_softc *);
140 static void ath_desc_free(struct ath_softc *);
141 static struct ieee80211_node *ath_node_alloc(struct ieee80211_node_table *);
142 static void ath_node_free(struct ieee80211_node *);
143 static int8_t ath_node_getrssi(const struct ieee80211_node *);
144 static void ath_node_getsignal(const struct ieee80211_node *,
146 static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *);
147 static void ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m,
148 struct ieee80211_node *ni,
149 int subtype, int rssi, int noise, u_int32_t rstamp);
150 static void ath_setdefantenna(struct ath_softc *, u_int);
151 static void ath_rx_proc(void *, int);
152 static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
153 static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
154 static int ath_tx_setup(struct ath_softc *, int, int);
155 static int ath_wme_update(struct ieee80211com *);
156 static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
157 static void ath_tx_cleanup(struct ath_softc *);
158 static void ath_freetx(struct mbuf *);
159 static int ath_tx_start(struct ath_softc *, struct ieee80211_node *,
160 struct ath_buf *, struct mbuf *);
161 static void ath_tx_proc_q0(void *, int);
162 static void ath_tx_proc_q0123(void *, int);
163 static void ath_tx_proc(void *, int);
164 static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
165 static void ath_draintxq(struct ath_softc *);
166 static void ath_stoprecv(struct ath_softc *);
167 static int ath_startrecv(struct ath_softc *);
168 static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
169 static void ath_scan_start(struct ieee80211com *);
170 static void ath_scan_end(struct ieee80211com *);
171 static void ath_set_channel(struct ieee80211com *);
172 static void ath_calibrate(void *);
173 static int ath_newstate(struct ieee80211com *, enum ieee80211_state, int);
174 static void ath_setup_stationkey(struct ieee80211_node *);
175 static void ath_newassoc(struct ieee80211_node *, int);
176 static int ath_getchannels(struct ath_softc *,
177 HAL_REG_DOMAIN, HAL_CTRY_CODE, HAL_BOOL, HAL_BOOL);
178 static void ath_led_event(struct ath_softc *, int);
179 static void ath_update_txpow(struct ath_softc *);
181 static int ath_rate_setup(struct ath_softc *, u_int mode);
182 static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
184 static void ath_sysctlattach(struct ath_softc *);
185 static int ath_raw_xmit(struct ieee80211_node *,
186 struct mbuf *, const struct ieee80211_bpf_params *);
187 static void ath_bpfattach(struct ath_softc *);
188 static void ath_announce(struct ath_softc *);
190 SYSCTL_DECL(_hw_ath);
192 /* XXX validate sysctl values */
193 static int ath_calinterval = 30; /* calibrate every 30 secs */
194 SYSCTL_INT(_hw_ath, OID_AUTO, calibrate, CTLFLAG_RW, &ath_calinterval,
195 0, "chip calibration interval (secs)");
196 static int ath_outdoor = AH_TRUE; /* outdoor operation */
197 SYSCTL_INT(_hw_ath, OID_AUTO, outdoor, CTLFLAG_RW, &ath_outdoor,
198 0, "outdoor operation");
199 TUNABLE_INT("hw.ath.outdoor", &ath_outdoor);
200 static int ath_xchanmode = AH_TRUE; /* extended channel use */
201 SYSCTL_INT(_hw_ath, OID_AUTO, xchanmode, CTLFLAG_RW, &ath_xchanmode,
202 0, "extended channel mode");
203 TUNABLE_INT("hw.ath.xchanmode", &ath_xchanmode);
204 static int ath_countrycode = CTRY_DEFAULT; /* country code */
205 SYSCTL_INT(_hw_ath, OID_AUTO, countrycode, CTLFLAG_RW, &ath_countrycode,
207 TUNABLE_INT("hw.ath.countrycode", &ath_countrycode);
208 static int ath_regdomain = 0; /* regulatory domain */
209 SYSCTL_INT(_hw_ath, OID_AUTO, regdomain, CTLFLAG_RD, &ath_regdomain,
210 0, "regulatory domain");
212 static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
213 SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
214 0, "rx buffers allocated");
215 TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
216 static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
217 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
218 0, "tx buffers allocated");
219 TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
222 static int ath_debug = 0;
223 SYSCTL_INT(_hw_ath, OID_AUTO, debug, CTLFLAG_RW, &ath_debug,
224 0, "control debugging printfs");
225 TUNABLE_INT("hw.ath.debug", &ath_debug);
227 ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
228 ATH_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */
229 ATH_DEBUG_RECV = 0x00000004, /* basic recv operation */
230 ATH_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */
231 ATH_DEBUG_RATE = 0x00000010, /* rate control */
232 ATH_DEBUG_RESET = 0x00000020, /* reset processing */
233 ATH_DEBUG_MODE = 0x00000040, /* mode init/setup */
234 ATH_DEBUG_BEACON = 0x00000080, /* beacon handling */
235 ATH_DEBUG_WATCHDOG = 0x00000100, /* watchdog timeout */
236 ATH_DEBUG_INTR = 0x00001000, /* ISR */
237 ATH_DEBUG_TX_PROC = 0x00002000, /* tx ISR proc */
238 ATH_DEBUG_RX_PROC = 0x00004000, /* rx ISR proc */
239 ATH_DEBUG_BEACON_PROC = 0x00008000, /* beacon ISR proc */
240 ATH_DEBUG_CALIBRATE = 0x00010000, /* periodic calibration */
241 ATH_DEBUG_KEYCACHE = 0x00020000, /* key cache management */
242 ATH_DEBUG_STATE = 0x00040000, /* 802.11 state transitions */
243 ATH_DEBUG_NODE = 0x00080000, /* node management */
244 ATH_DEBUG_LED = 0x00100000, /* led management */
245 ATH_DEBUG_FF = 0x00200000, /* fast frames */
246 ATH_DEBUG_DFS = 0x00400000, /* DFS processing */
247 ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */
248 ATH_DEBUG_ANY = 0xffffffff
250 #define IFF_DUMPPKTS(sc, m) \
251 ((sc->sc_debug & (m)) || \
252 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
253 #define DPRINTF(sc, m, fmt, ...) do { \
254 if (sc->sc_debug & (m)) \
255 printf(fmt, __VA_ARGS__); \
257 #define KEYPRINTF(sc, ix, hk, mac) do { \
258 if (sc->sc_debug & ATH_DEBUG_KEYCACHE) \
259 ath_keyprint(sc, __func__, ix, hk, mac); \
261 static void ath_printrxbuf(const struct ath_buf *bf, u_int ix, int);
262 static void ath_printtxbuf(const struct ath_buf *bf, u_int qnum, u_int ix, int done);
264 #define IFF_DUMPPKTS(sc, m) \
265 ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
266 #define DPRINTF(sc, m, fmt, ...) do { \
269 #define KEYPRINTF(sc, k, ix, mac) do { \
274 MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
277 ath_attach(u_int16_t devid, struct ath_softc *sc)
280 struct ieee80211com *ic = &sc->sc_ic;
281 struct ath_hal *ah = NULL;
285 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
287 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
289 device_printf(sc->sc_dev, "can not if_alloc()\n");
294 /* set these up early for if_printf use */
295 if_initname(ifp, device_get_name(sc->sc_dev),
296 device_get_unit(sc->sc_dev));
298 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, &status);
300 if_printf(ifp, "unable to attach hardware; HAL status %u\n",
305 if (ah->ah_abi != HAL_ABI_VERSION) {
306 if_printf(ifp, "HAL ABI mismatch detected "
307 "(HAL:0x%x != driver:0x%x)\n",
308 ah->ah_abi, HAL_ABI_VERSION);
313 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
316 * Check if the MAC has multi-rate retry support.
317 * We do this by trying to setup a fake extended
318 * descriptor. MAC's that don't have support will
319 * return false w/o doing anything. MAC's that do
320 * support it will return true w/o doing anything.
322 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
325 * Check if the device has hardware counters for PHY
326 * errors. If so we need to enable the MIB interrupt
327 * so we can act on stat triggers.
329 if (ath_hal_hwphycounters(ah))
333 * Get the hardware key cache size.
335 sc->sc_keymax = ath_hal_keycachesize(ah);
336 if (sc->sc_keymax > ATH_KEYMAX) {
337 if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
338 ATH_KEYMAX, sc->sc_keymax);
339 sc->sc_keymax = ATH_KEYMAX;
342 * Reset the key cache since some parts do not
343 * reset the contents on initial power up.
345 for (i = 0; i < sc->sc_keymax; i++)
346 ath_hal_keyreset(ah, i);
349 * Collect the channel list using the default country
350 * code and including outdoor channels. The 802.11 layer
351 * is resposible for filtering this list based on settings
354 error = ath_getchannels(sc, ath_regdomain, ath_countrycode,
355 ath_outdoor != 0, ath_xchanmode != 0);
360 * Setup rate tables for all potential media types.
362 ath_rate_setup(sc, IEEE80211_MODE_11A);
363 ath_rate_setup(sc, IEEE80211_MODE_11B);
364 ath_rate_setup(sc, IEEE80211_MODE_11G);
365 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
366 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
367 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
368 ath_rate_setup(sc, IEEE80211_MODE_11NA);
369 ath_rate_setup(sc, IEEE80211_MODE_11NG);
370 ath_rate_setup(sc, IEEE80211_MODE_HALF);
371 ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
373 /* NB: setup here so ath_rate_update is happy */
374 ath_setcurmode(sc, IEEE80211_MODE_11A);
377 * Allocate tx+rx descriptors and populate the lists.
379 error = ath_desc_alloc(sc);
381 if_printf(ifp, "failed to allocate descriptors: %d\n", error);
384 callout_init(&sc->sc_cal_ch, CALLOUT_MPSAFE);
386 ATH_TXBUF_LOCK_INIT(sc);
388 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT,
389 taskqueue_thread_enqueue, &sc->sc_tq);
390 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
391 "%s taskq", ifp->if_xname);
393 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc);
394 TASK_INIT(&sc->sc_rxorntask, 0, ath_rxorn_proc, sc);
395 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
396 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
399 * Allocate hardware transmit queues: one queue for
400 * beacon frames and one data queue for each QoS
401 * priority. Note that the hal handles reseting
402 * these queues at the needed time.
406 sc->sc_bhalq = ath_beaconq_setup(ah);
407 if (sc->sc_bhalq == (u_int) -1) {
408 if_printf(ifp, "unable to setup a beacon xmit queue!\n");
412 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
413 if (sc->sc_cabq == NULL) {
414 if_printf(ifp, "unable to setup CAB xmit queue!\n");
418 /* NB: s/w q, qnum used only by WITNESS */
419 ath_txq_init(sc, &sc->sc_mcastq, HAL_NUM_TX_QUEUES+1);
420 /* NB: insure BK queue is the lowest priority h/w queue */
421 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
422 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
423 ieee80211_wme_acnames[WME_AC_BK]);
427 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
428 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
429 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
431 * Not enough hardware tx queues to properly do WME;
432 * just punt and assign them all to the same h/w queue.
433 * We could do a better job of this if, for example,
434 * we allocate queues when we switch from station to
437 if (sc->sc_ac2q[WME_AC_VI] != NULL)
438 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
439 if (sc->sc_ac2q[WME_AC_BE] != NULL)
440 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
441 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
442 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
443 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
447 * Special case certain configurations. Note the
448 * CAB queue is handled by these specially so don't
449 * include them when checking the txq setup mask.
451 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
453 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
456 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
459 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
464 * Setup rate control. Some rate control modules
465 * call back to change the anntena state so expose
466 * the necessary entry points.
467 * XXX maybe belongs in struct ath_ratectrl?
469 sc->sc_setdefantenna = ath_setdefantenna;
470 sc->sc_rc = ath_rate_attach(sc);
471 if (sc->sc_rc == NULL) {
478 sc->sc_ledon = 0; /* low true */
479 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */
480 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE);
482 * Auto-enable soft led processing for IBM cards and for
483 * 5211 minipci cards. Users can also manually enable/disable
484 * support with a sysctl.
486 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
487 if (sc->sc_softled) {
488 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin);
489 ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
493 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
494 ifp->if_start = ath_start;
495 ifp->if_watchdog = ath_watchdog;
496 ifp->if_ioctl = ath_ioctl;
497 ifp->if_init = ath_init;
498 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
499 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
500 IFQ_SET_READY(&ifp->if_snd);
503 ic->ic_reset = ath_reset;
504 ic->ic_newassoc = ath_newassoc;
505 ic->ic_updateslot = ath_updateslot;
506 ic->ic_wme.wme_update = ath_wme_update;
507 /* XXX not right but it's not used anywhere important */
508 ic->ic_phytype = IEEE80211_T_OFDM;
509 ic->ic_opmode = IEEE80211_M_STA;
511 IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
512 | IEEE80211_C_HOSTAP /* hostap mode */
513 | IEEE80211_C_MONITOR /* monitor mode */
514 | IEEE80211_C_AHDEMO /* adhoc demo mode */
515 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
516 | IEEE80211_C_SHSLOT /* short slot time supported */
517 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
518 | IEEE80211_C_BGSCAN /* capable of bg scanning */
519 | IEEE80211_C_TXFRAG /* handle tx frags */
522 * Query the hal to figure out h/w crypto support.
524 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
525 ic->ic_caps |= IEEE80211_C_WEP;
526 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
527 ic->ic_caps |= IEEE80211_C_AES;
528 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
529 ic->ic_caps |= IEEE80211_C_AES_CCM;
530 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
531 ic->ic_caps |= IEEE80211_C_CKIP;
532 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
533 ic->ic_caps |= IEEE80211_C_TKIP;
535 * Check if h/w does the MIC and/or whether the
536 * separate key cache entries are required to
537 * handle both tx+rx MIC keys.
539 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
540 ic->ic_caps |= IEEE80211_C_TKIPMIC;
542 * If the h/w supports storing tx+rx MIC keys
543 * in one cache slot automatically enable use.
545 if (ath_hal_hastkipsplit(ah) ||
546 !ath_hal_settkipsplit(ah, AH_FALSE))
549 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
550 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
552 * Mark key cache slots associated with global keys
553 * as in use. If we knew TKIP was not to be used we
554 * could leave the +32, +64, and +32+64 slots free.
556 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
557 setbit(sc->sc_keymap, i);
558 setbit(sc->sc_keymap, i+64);
559 if (sc->sc_splitmic) {
560 setbit(sc->sc_keymap, i+32);
561 setbit(sc->sc_keymap, i+32+64);
565 * TPC support can be done either with a global cap or
566 * per-packet support. The latter is not available on
567 * all parts. We're a bit pedantic here as all parts
568 * support a global cap.
570 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
571 ic->ic_caps |= IEEE80211_C_TXPMGT;
574 * Mark WME capability only if we have sufficient
575 * hardware queues to do proper priority scheduling.
577 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
578 ic->ic_caps |= IEEE80211_C_WME;
580 * Check for misc other capabilities.
582 if (ath_hal_hasbursting(ah))
583 ic->ic_caps |= IEEE80211_C_BURST;
584 if (ath_hal_hasfastframes(ah))
585 ic->ic_caps |= IEEE80211_C_FF;
586 if (ath_hal_getwirelessmodes(ah, ath_countrycode) & (HAL_MODE_108G|HAL_MODE_TURBO))
587 ic->ic_caps |= IEEE80211_C_TURBOP;
590 * Indicate we need the 802.11 header padded to a
591 * 32-bit boundary for 4-address and QoS frames.
593 ic->ic_flags |= IEEE80211_F_DATAPAD;
596 * Query the hal about antenna support.
598 sc->sc_defant = ath_hal_getdefantenna(ah);
601 * Not all chips have the VEOL support we want to
602 * use with IBSS beacons; check here for it.
604 sc->sc_hasveol = ath_hal_hasveol(ah);
606 /* get mac address from hardware */
607 ath_hal_getmac(ah, ic->ic_myaddr);
609 /* call MI attach routine. */
610 ieee80211_ifattach(ic);
611 sc->sc_opmode = ic->ic_opmode;
612 /* override default methods */
613 ic->ic_node_alloc = ath_node_alloc;
614 sc->sc_node_free = ic->ic_node_free;
615 ic->ic_node_free = ath_node_free;
616 ic->ic_node_getrssi = ath_node_getrssi;
617 ic->ic_node_getsignal = ath_node_getsignal;
618 sc->sc_recv_mgmt = ic->ic_recv_mgmt;
619 ic->ic_recv_mgmt = ath_recv_mgmt;
620 sc->sc_newstate = ic->ic_newstate;
621 ic->ic_newstate = ath_newstate;
622 ic->ic_scan_start = ath_scan_start;
623 ic->ic_scan_end = ath_scan_end;
624 ic->ic_set_channel = ath_set_channel;
625 ic->ic_crypto.cs_max_keyix = sc->sc_keymax;
626 ic->ic_crypto.cs_key_alloc = ath_key_alloc;
627 ic->ic_crypto.cs_key_delete = ath_key_delete;
628 ic->ic_crypto.cs_key_set = ath_key_set;
629 ic->ic_crypto.cs_key_update_begin = ath_key_update_begin;
630 ic->ic_crypto.cs_key_update_end = ath_key_update_end;
631 ic->ic_raw_xmit = ath_raw_xmit;
632 ic->ic_update_beacon = ath_beacon_update;
633 /* complete initialization */
634 ieee80211_media_init(ic, ath_media_change, ieee80211_media_status);
638 * Setup dynamic sysctl's now that country code and
639 * regdomain are available from the hal.
641 ath_sysctlattach(sc);
644 ieee80211_announce(ic);
660 ath_detach(struct ath_softc *sc)
662 struct ifnet *ifp = sc->sc_ifp;
664 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
665 __func__, ifp->if_flags);
670 * NB: the order of these is important:
671 * o call the 802.11 layer before detaching the hal to
672 * insure callbacks into the driver to delete global
673 * key cache entries can be handled
674 * o reclaim the tx queue data structures after calling
675 * the 802.11 layer as we'll get called back to reclaim
676 * node state and potentially want to use them
677 * o to cleanup the tx queues the hal is called, so detach
679 * Other than that, it's straightforward...
681 ieee80211_ifdetach(&sc->sc_ic);
683 if (sc->sc_tx99 != NULL)
684 sc->sc_tx99->detach(sc->sc_tx99);
686 taskqueue_free(sc->sc_tq);
687 ath_rate_detach(sc->sc_rc);
690 ath_hal_detach(sc->sc_ah);
697 ath_suspend(struct ath_softc *sc)
699 struct ifnet *ifp = sc->sc_ifp;
701 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
702 __func__, ifp->if_flags);
708 ath_resume(struct ath_softc *sc)
710 struct ifnet *ifp = sc->sc_ifp;
712 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
713 __func__, ifp->if_flags);
715 if (ifp->if_flags & IFF_UP) {
717 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
720 if (sc->sc_softled) {
721 ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin);
722 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon);
727 ath_shutdown(struct ath_softc *sc)
729 struct ifnet *ifp = sc->sc_ifp;
731 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
732 __func__, ifp->if_flags);
738 * Interrupt handler. Most of the actual processing is deferred.
743 struct ath_softc *sc = arg;
744 struct ifnet *ifp = sc->sc_ifp;
745 struct ath_hal *ah = sc->sc_ah;
748 if (sc->sc_invalid) {
750 * The hardware is not ready/present, don't touch anything.
751 * Note this can happen early on if the IRQ is shared.
753 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
756 if (!ath_hal_intrpend(ah)) /* shared irq, not for us */
758 if ((ifp->if_flags & IFF_UP) == 0 ||
759 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
762 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
763 __func__, ifp->if_flags);
764 ath_hal_getisr(ah, &status); /* clear ISR */
765 ath_hal_intrset(ah, 0); /* disable further intr's */
769 * Figure out the reason(s) for the interrupt. Note
770 * that the hal returns a pseudo-ISR that may include
771 * bits we haven't explicitly enabled so we mask the
772 * value to insure we only process bits we requested.
774 ath_hal_getisr(ah, &status); /* NB: clears ISR too */
775 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
776 status &= sc->sc_imask; /* discard unasked for bits */
777 if (status & HAL_INT_FATAL) {
778 sc->sc_stats.ast_hardware++;
779 ath_hal_intrset(ah, 0); /* disable intr's until reset */
780 ath_fatal_proc(sc, 0);
781 } else if (status & HAL_INT_RXORN) {
782 sc->sc_stats.ast_rxorn++;
783 ath_hal_intrset(ah, 0); /* disable intr's until reset */
784 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxorntask);
786 if (status & HAL_INT_SWBA) {
788 * Software beacon alert--time to send a beacon.
789 * Handle beacon transmission directly; deferring
790 * this is too slow to meet timing constraints
793 ath_beacon_proc(sc, 0);
795 if (status & HAL_INT_RXEOL) {
797 * NB: the hardware should re-read the link when
798 * RXE bit is written, but it doesn't work at
799 * least on older hardware revs.
801 sc->sc_stats.ast_rxeol++;
802 sc->sc_rxlink = NULL;
804 if (status & HAL_INT_TXURN) {
805 sc->sc_stats.ast_txurn++;
806 /* bump tx trigger level */
807 ath_hal_updatetxtriglevel(ah, AH_TRUE);
809 if (status & HAL_INT_RX)
810 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
811 if (status & HAL_INT_TX)
812 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
813 if (status & HAL_INT_BMISS) {
814 sc->sc_stats.ast_bmiss++;
815 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
817 if (status & HAL_INT_MIB) {
818 sc->sc_stats.ast_mib++;
820 * Disable interrupts until we service the MIB
821 * interrupt; otherwise it will continue to fire.
823 ath_hal_intrset(ah, 0);
825 * Let the hal handle the event. We assume it will
826 * clear whatever condition caused the interrupt.
828 ath_hal_mibevent(ah, &sc->sc_halstats);
829 ath_hal_intrset(ah, sc->sc_imask);
835 ath_fatal_proc(void *arg, int pending)
837 struct ath_softc *sc = arg;
838 struct ifnet *ifp = sc->sc_ifp;
843 if_printf(ifp, "hardware error; resetting\n");
845 * Fatal errors are unrecoverable. Typically these
846 * are caused by DMA errors. Collect h/w state from
847 * the hal so we can diagnose what's going on.
849 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
850 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
852 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
853 state[0], state[1] , state[2], state[3],
860 ath_rxorn_proc(void *arg, int pending)
862 struct ath_softc *sc = arg;
863 struct ifnet *ifp = sc->sc_ifp;
865 if_printf(ifp, "rx FIFO overrun; resetting\n");
870 ath_bmiss_proc(void *arg, int pending)
872 struct ath_softc *sc = arg;
873 struct ieee80211com *ic = &sc->sc_ic;
875 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
876 KASSERT(ic->ic_opmode == IEEE80211_M_STA,
877 ("unexpect operating mode %u", ic->ic_opmode));
878 if (ic->ic_state == IEEE80211_S_RUN) {
879 u_int64_t lastrx = sc->sc_lastrx;
880 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
882 ic->ic_bmissthreshold * ic->ic_bss->ni_intval * 1024;
884 DPRINTF(sc, ATH_DEBUG_BEACON,
885 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
886 __func__, (unsigned long long) tsf,
887 (unsigned long long)(tsf - lastrx),
888 (unsigned long long) lastrx, bmisstimeout);
890 * Workaround phantom bmiss interrupts by sanity-checking
891 * the time of our last rx'd frame. If it is within the
892 * beacon miss interval then ignore the interrupt. If it's
893 * truly a bmiss we'll get another interrupt soon and that'll
894 * be dispatched up for processing.
896 if (tsf - lastrx > bmisstimeout)
897 ieee80211_beacon_miss(ic);
899 sc->sc_stats.ast_bmiss_phantom++;
904 * Convert net80211 channel to a HAL channel with the flags
905 * constrained to reflect the current operating mode and
906 * the frequency possibly mapped for GSM channels.
909 ath_mapchan(HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
911 #define N(a) (sizeof(a) / sizeof(a[0]))
912 static const u_int modeflags[IEEE80211_MODE_MAX] = {
913 0, /* IEEE80211_MODE_AUTO */
914 CHANNEL_A, /* IEEE80211_MODE_11A */
915 CHANNEL_B, /* IEEE80211_MODE_11B */
916 CHANNEL_PUREG, /* IEEE80211_MODE_11G */
917 0, /* IEEE80211_MODE_FH */
918 CHANNEL_108A, /* IEEE80211_MODE_TURBO_A */
919 CHANNEL_108G, /* IEEE80211_MODE_TURBO_G */
920 CHANNEL_ST, /* IEEE80211_MODE_STURBO_A */
921 CHANNEL_A, /* IEEE80211_MODE_11NA */
922 CHANNEL_PUREG, /* IEEE80211_MODE_11NG */
924 enum ieee80211_phymode mode = ieee80211_chan2mode(chan);
926 KASSERT(mode < N(modeflags), ("unexpected phy mode %u", mode));
927 KASSERT(modeflags[mode] != 0, ("mode %u undefined", mode));
928 hc->channelFlags = modeflags[mode];
929 if (IEEE80211_IS_CHAN_HALF(chan))
930 hc->channelFlags |= CHANNEL_HALF;
931 if (IEEE80211_IS_CHAN_QUARTER(chan))
932 hc->channelFlags |= CHANNEL_QUARTER;
933 if (IEEE80211_IS_CHAN_HT20(chan))
934 hc->channelFlags |= CHANNEL_HT20;
935 if (IEEE80211_IS_CHAN_HT40D(chan))
936 hc->channelFlags |= CHANNEL_HT40MINUS;
937 if (IEEE80211_IS_CHAN_HT40U(chan))
938 hc->channelFlags |= CHANNEL_HT40PLUS;
940 hc->channel = IEEE80211_IS_CHAN_GSM(chan) ?
941 2422 + (922 - chan->ic_freq) : chan->ic_freq;
948 struct ath_softc *sc = (struct ath_softc *) arg;
949 struct ieee80211com *ic = &sc->sc_ic;
950 struct ifnet *ifp = sc->sc_ifp;
951 struct ath_hal *ah = sc->sc_ah;
954 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
955 __func__, ifp->if_flags);
959 * Stop anything previously setup. This is safe
960 * whether this is the first time through or not.
962 ath_stop_locked(ifp);
965 * The basic interface to setting the hardware in a good
966 * state is ``reset''. On return the hardware is known to
967 * be powered up and with interrupts disabled. This must
968 * be followed by initialization of the appropriate bits
969 * and then setup of the interrupt mask.
971 ath_mapchan(&sc->sc_curchan, ic->ic_curchan);
972 if (!ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_FALSE, &status)) {
973 if_printf(ifp, "unable to reset hardware; hal status %u\n",
979 * This is needed only to setup initial state
980 * but it's best done after a reset.
982 ath_update_txpow(sc);
984 * Likewise this is set during reset so update
985 * state cached in the driver.
987 sc->sc_diversity = ath_hal_getdiversity(ah);
988 sc->sc_calinterval = 1;
992 * Setup the hardware after reset: the key cache
993 * is filled as needed and the receive engine is
994 * set going. Frame transmit is handled entirely
995 * in the frame output path; there's nothing to do
996 * here except setup the interrupt mask.
998 if (ath_startrecv(sc) != 0) {
999 if_printf(ifp, "unable to start recv logic\n");
1004 * Enable interrupts.
1006 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
1007 | HAL_INT_RXEOL | HAL_INT_RXORN
1008 | HAL_INT_FATAL | HAL_INT_GLOBAL;
1010 * Enable MIB interrupts when there are hardware phy counters.
1011 * Note we only do this (at the moment) for station mode.
1013 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
1014 sc->sc_imask |= HAL_INT_MIB;
1015 ath_hal_intrset(ah, sc->sc_imask);
1017 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1018 ic->ic_state = IEEE80211_S_INIT;
1021 * The hardware should be ready to go now so it's safe
1022 * to kick the 802.11 state machine as it's likely to
1023 * immediately call back to us to send mgmt frames.
1025 ath_chan_change(sc, ic->ic_curchan);
1026 #ifdef ATH_TX99_DIAG
1027 if (sc->sc_tx99 != NULL)
1028 sc->sc_tx99->start(sc->sc_tx99);
1031 if (ic->ic_opmode != IEEE80211_M_MONITOR) {
1032 if (ic->ic_roaming != IEEE80211_ROAMING_MANUAL)
1033 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
1035 ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
1041 ath_stop_locked(struct ifnet *ifp)
1043 struct ath_softc *sc = ifp->if_softc;
1044 struct ieee80211com *ic = &sc->sc_ic;
1045 struct ath_hal *ah = sc->sc_ah;
1047 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1048 __func__, sc->sc_invalid, ifp->if_flags);
1050 ATH_LOCK_ASSERT(sc);
1051 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1053 * Shutdown the hardware and driver:
1054 * reset 802.11 state machine
1056 * disable interrupts
1057 * turn off the radio
1058 * clear transmit machinery
1059 * clear receive machinery
1060 * drain and release tx queues
1061 * reclaim beacon resources
1062 * power down hardware
1064 * Note that some of this work is not possible if the
1065 * hardware is gone (invalid).
1067 #ifdef ATH_TX99_DIAG
1068 if (sc->sc_tx99 != NULL)
1069 sc->sc_tx99->stop(sc->sc_tx99);
1071 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
1072 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1074 if (!sc->sc_invalid) {
1075 if (sc->sc_softled) {
1076 callout_stop(&sc->sc_ledtimer);
1077 ath_hal_gpioset(ah, sc->sc_ledpin,
1079 sc->sc_blinking = 0;
1081 ath_hal_intrset(ah, 0);
1084 if (!sc->sc_invalid) {
1086 ath_hal_phydisable(ah);
1088 sc->sc_rxlink = NULL;
1089 IFQ_DRV_PURGE(&ifp->if_snd);
1090 ath_beacon_free(sc);
1095 ath_stop(struct ifnet *ifp)
1097 struct ath_softc *sc = ifp->if_softc;
1100 ath_stop_locked(ifp);
1101 if (!sc->sc_invalid) {
1103 * Set the chip in full sleep mode. Note that we are
1104 * careful to do this only when bringing the interface
1105 * completely to a stop. When the chip is in this state
1106 * it must be carefully woken up or references to
1107 * registers in the PCI clock domain may freeze the bus
1108 * (and system). This varies by chip and is mostly an
1109 * issue with newer parts that go to sleep more quickly.
1111 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP);
1117 * Reset the hardware w/o losing operational state. This is
1118 * basically a more efficient way of doing ath_stop, ath_init,
1119 * followed by state transitions to the current 802.11
1120 * operational state. Used to recover from various errors and
1121 * to reset or reload hardware state.
1124 ath_reset(struct ifnet *ifp)
1126 struct ath_softc *sc = ifp->if_softc;
1127 struct ieee80211com *ic = &sc->sc_ic;
1128 struct ath_hal *ah = sc->sc_ah;
1132 * Convert to a HAL channel description with the flags
1133 * constrained to reflect the current operating mode.
1135 ath_mapchan(&sc->sc_curchan, ic->ic_curchan);
1137 ath_hal_intrset(ah, 0); /* disable interrupts */
1138 ath_draintxq(sc); /* stop xmit side */
1139 ath_stoprecv(sc); /* stop recv side */
1140 /* NB: indicate channel change so we do a full reset */
1141 if (!ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_TRUE, &status))
1142 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
1144 ath_update_txpow(sc); /* update tx power state */
1145 sc->sc_diversity = ath_hal_getdiversity(ah);
1146 sc->sc_calinterval = 1;
1147 sc->sc_caltries = 0;
1148 if (ath_startrecv(sc) != 0) /* restart recv */
1149 if_printf(ifp, "%s: unable to start recv logic\n", __func__);
1151 * We may be doing a reset in response to an ioctl
1152 * that changes the channel so update any state that
1153 * might change as a result.
1155 ath_chan_change(sc, ic->ic_curchan);
1156 if (ic->ic_state == IEEE80211_S_RUN)
1157 ath_beacon_config(sc); /* restart beacons */
1158 ath_hal_intrset(ah, sc->sc_imask);
1160 ath_start(ifp); /* restart xmit */
1165 ath_ff_always(struct ath_txq *txq, struct ath_buf *bf)
1172 ath_ff_ageflushtestdone(struct ath_txq *txq, struct ath_buf *bf)
1174 return (txq->axq_curage - bf->bf_age) < ATH_FF_STAGEMAX;
1179 * Flush FF staging queue.
1182 ath_ff_stageq_flush(struct ath_softc *sc, struct ath_txq *txq,
1183 int (*ath_ff_flushdonetest)(struct ath_txq *txq, struct ath_buf *bf))
1186 struct ieee80211_node *ni;
1192 * Go from the back (oldest) to front so we can
1193 * stop early based on the age of the entry.
1195 bf = TAILQ_LAST(&txq->axq_stageq, axq_headtype);
1196 if (bf == NULL || ath_ff_flushdonetest(txq, bf)) {
1197 ATH_TXQ_UNLOCK(txq);
1202 pri = M_WME_GETAC(bf->bf_m);
1203 KASSERT(ATH_NODE(ni)->an_ff_buf[pri],
1204 ("no bf on staging queue %p", bf));
1205 ATH_NODE(ni)->an_ff_buf[pri] = NULL;
1206 TAILQ_REMOVE(&txq->axq_stageq, bf, bf_stagelist);
1208 ATH_TXQ_UNLOCK(txq);
1210 DPRINTF(sc, ATH_DEBUG_FF, "%s: flush frame, age %u\n",
1211 __func__, bf->bf_age);
1213 sc->sc_stats.ast_ff_flush++;
1215 /* encap and xmit */
1216 bf->bf_m = ieee80211_encap(&sc->sc_ic, bf->bf_m, ni);
1217 if (bf->bf_m == NULL) {
1218 DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
1219 "%s: discard, encapsulation failure\n",
1221 sc->sc_stats.ast_tx_encap++;
1224 pktlen = bf->bf_m->m_pkthdr.len; /* NB: don't reference below */
1225 if (ath_tx_start(sc, ni, bf, bf->bf_m) == 0) {
1233 ieee80211_free_node(ni);
1235 if (bf->bf_m != NULL) {
1241 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
1242 ATH_TXBUF_UNLOCK(sc);
1246 static __inline u_int32_t
1247 ath_ff_approx_txtime(struct ath_softc *sc, struct ath_node *an, struct mbuf *m)
1253 * Approximate the frame length to be transmitted. A swag to add
1254 * the following maximal values to the skb payload:
1255 * - 32: 802.11 encap + CRC
1256 * - 24: encryption overhead (if wep bit)
1257 * - 4 + 6: fast-frame header and padding
1258 * - 16: 2 LLC FF tunnel headers
1259 * - 14: 1 802.3 FF tunnel header (skb already accounts for 2nd)
1261 framelen = m->m_pkthdr.len + 32 + 4 + 6 + 16 + 14;
1262 if (sc->sc_ic.ic_flags & IEEE80211_F_PRIVACY)
1264 bf = an->an_ff_buf[M_WME_GETAC(m)];
1266 framelen += bf->bf_m->m_pkthdr.len;
1267 return ath_hal_computetxtime(sc->sc_ah, sc->sc_currates, framelen,
1268 sc->sc_lastdatarix, AH_FALSE);
1272 * Determine if a data frame may be aggregated via ff tunnelling.
1273 * Note the caller is responsible for checking if the destination
1274 * supports fast frames.
1276 * NB: allowing EAPOL frames to be aggregated with other unicast traffic.
1277 * Do 802.1x EAPOL frames proceed in the clear? Then they couldn't
1278 * be aggregated with other types of frames when encryption is on?
1280 * NB: assumes lock on an_ff_buf effectively held by txq lock mechanism.
1283 ath_ff_can_aggregate(struct ath_softc *sc,
1284 struct ath_node *an, struct mbuf *m, int *flushq)
1286 struct ieee80211com *ic = &sc->sc_ic;
1287 struct ath_txq *txq;
1288 u_int32_t txoplimit;
1294 * If there is no frame to combine with and the txq has
1295 * fewer frames than the minimum required; then do not
1296 * attempt to aggregate this frame.
1298 pri = M_WME_GETAC(m);
1299 txq = sc->sc_ac2q[pri];
1300 if (an->an_ff_buf[pri] == NULL && txq->axq_depth < sc->sc_fftxqmin)
1303 * When not in station mode never aggregate a multicast
1304 * frame; this insures, for example, that a combined frame
1305 * does not require multiple encryption keys when using
1308 if (ic->ic_opmode != IEEE80211_M_STA &&
1309 ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost))
1312 * Consult the max bursting interval to insure a combined
1313 * frame fits within the TxOp window.
1315 txoplimit = IEEE80211_TXOP_TO_US(
1316 ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit);
1317 if (txoplimit != 0 && ath_ff_approx_txtime(sc, an, m) > txoplimit) {
1318 DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
1319 "%s: FF TxOp violation\n", __func__);
1320 if (an->an_ff_buf[pri] != NULL)
1324 return 1; /* try to aggregate */
1328 * Check if the supplied frame can be partnered with an existing
1329 * or pending frame. Return a reference to any frame that should be
1330 * sent on return; otherwise return NULL.
1332 static struct mbuf *
1333 ath_ff_check(struct ath_softc *sc, struct ath_txq *txq,
1334 struct ath_buf *bf, struct mbuf *m, struct ieee80211_node *ni)
1336 struct ieee80211com *ic = ni->ni_ic;
1337 struct ath_node *an = ATH_NODE(ni);
1338 struct ath_buf *bfstaged;
1342 * Check if the supplied frame can be aggregated.
1344 * NB: we use the txq lock to protect references to
1345 * an->an_ff_txbuf in ath_ff_can_aggregate().
1348 pri = M_WME_GETAC(m);
1349 if (ath_ff_can_aggregate(sc, an, m, &ff_flush)) {
1350 struct ath_buf *bfstaged = an->an_ff_buf[pri];
1351 if (bfstaged != NULL) {
1353 * A frame is available for partnering; remove
1354 * it, chain it to this one, and encapsulate.
1356 an->an_ff_buf[pri] = NULL;
1357 TAILQ_REMOVE(&txq->axq_stageq, bfstaged, bf_stagelist);
1358 ATH_TXQ_UNLOCK(txq);
1361 * Chain mbufs and add FF magic.
1363 DPRINTF(sc, ATH_DEBUG_FF,
1364 "[%s] aggregate fast-frame, age %u\n",
1365 ether_sprintf(ni->ni_macaddr), txq->axq_curage);
1366 m->m_nextpkt = NULL;
1367 bfstaged->bf_m->m_nextpkt = m;
1369 bfstaged->bf_m = NULL;
1372 * Release the node reference held while
1373 * the packet sat on an_ff_buf[]
1375 bfstaged->bf_node = NULL;
1376 ieee80211_free_node(ni);
1379 * Return bfstaged to the free list.
1382 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bfstaged, bf_list);
1383 ATH_TXBUF_UNLOCK(sc);
1385 return m; /* ready to go */
1388 * No frame available, queue this frame to wait
1389 * for a partner. Note that we hold the buffer
1390 * and a reference to the node; we need the
1391 * buffer in particular so we're certain we
1392 * can flush the frame at a later time.
1394 DPRINTF(sc, ATH_DEBUG_FF,
1395 "[%s] stage fast-frame, age %u\n",
1396 ether_sprintf(ni->ni_macaddr), txq->axq_curage);
1399 bf->bf_node = ni; /* NB: held reference */
1400 bf->bf_age = txq->axq_curage;
1401 an->an_ff_buf[pri] = bf;
1402 TAILQ_INSERT_HEAD(&txq->axq_stageq, bf, bf_stagelist);
1403 ATH_TXQ_UNLOCK(txq);
1405 return NULL; /* consumed */
1409 * Frame could not be aggregated, it needs to be returned
1410 * to the caller for immediate transmission. In addition
1411 * we check if we should first flush a frame from the
1412 * staging queue before sending this one.
1414 * NB: ath_ff_can_aggregate only marks ff_flush if a frame
1415 * is present to flush.
1420 bfstaged = an->an_ff_buf[pri];
1421 an->an_ff_buf[pri] = NULL;
1422 TAILQ_REMOVE(&txq->axq_stageq, bfstaged, bf_stagelist);
1423 ATH_TXQ_UNLOCK(txq);
1425 DPRINTF(sc, ATH_DEBUG_FF, "[%s] flush staged frame\n",
1426 ether_sprintf(an->an_node.ni_macaddr));
1428 /* encap and xmit */
1429 bfstaged->bf_m = ieee80211_encap(ic, bfstaged->bf_m, ni);
1430 if (bfstaged->bf_m == NULL) {
1431 DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
1432 "%s: discard, encap failure\n", __func__);
1433 sc->sc_stats.ast_tx_encap++;
1436 pktlen = bfstaged->bf_m->m_pkthdr.len;
1437 if (ath_tx_start(sc, ni, bfstaged, bfstaged->bf_m)) {
1438 DPRINTF(sc, ATH_DEBUG_XMIT,
1439 "%s: discard, xmit failure\n", __func__);
1442 * Unable to transmit frame that was on the staging
1443 * queue. Reclaim the node reference and other
1447 ieee80211_free_node(ni);
1448 bfstaged->bf_node = NULL;
1449 if (bfstaged->bf_m != NULL) {
1450 m_freem(bfstaged->bf_m);
1451 bfstaged->bf_m = NULL;
1455 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bfstaged, bf_list);
1456 ATH_TXBUF_UNLOCK(sc);
1463 if (an->an_ff_buf[pri] != NULL) {
1465 * XXX: out-of-order condition only occurs for AP
1466 * mode and multicast. There may be no valid way
1467 * to get this condition.
1469 DPRINTF(sc, ATH_DEBUG_FF, "[%s] out-of-order frame\n",
1470 ether_sprintf(an->an_node.ni_macaddr));
1473 ATH_TXQ_UNLOCK(txq);
1479 * Cleanup driver resources when we run out of buffers
1480 * while processing fragments; return the tx buffers
1481 * allocated and drop node references.
1484 ath_txfrag_cleanup(struct ath_softc *sc,
1485 ath_bufhead *frags, struct ieee80211_node *ni)
1487 struct ath_buf *bf, *next;
1489 ATH_TXBUF_LOCK_ASSERT(sc);
1491 STAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
1492 /* NB: bf assumed clean */
1493 STAILQ_REMOVE_HEAD(frags, bf_list);
1494 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
1495 ieee80211_node_decref(ni);
1500 * Setup xmit of a fragmented frame. Allocate a buffer
1501 * for each frag and bump the node reference count to
1502 * reflect the held reference to be setup by ath_tx_start.
1505 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
1506 struct mbuf *m0, struct ieee80211_node *ni)
1512 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
1513 bf = STAILQ_FIRST(&sc->sc_txbuf);
1514 if (bf == NULL) { /* out of buffers, cleanup */
1515 ath_txfrag_cleanup(sc, frags, ni);
1518 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
1519 ieee80211_node_incref(ni);
1520 STAILQ_INSERT_TAIL(frags, bf, bf_list);
1522 ATH_TXBUF_UNLOCK(sc);
1524 return !STAILQ_EMPTY(frags);
1528 ath_start(struct ifnet *ifp)
1530 struct ath_softc *sc = ifp->if_softc;
1531 struct ath_hal *ah = sc->sc_ah;
1532 struct ieee80211com *ic = &sc->sc_ic;
1533 struct ieee80211_node *ni;
1535 struct mbuf *m, *next;
1536 struct ieee80211_frame *wh;
1537 struct ether_header *eh;
1538 struct ath_txq *txq;
1542 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1546 * Grab a TX buffer and associated resources.
1549 bf = STAILQ_FIRST(&sc->sc_txbuf);
1551 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
1552 ATH_TXBUF_UNLOCK(sc);
1554 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: out of xmit buffers\n",
1556 sc->sc_stats.ast_tx_qstop++;
1557 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1561 * Poll the management queue for frames; they
1562 * have priority over normal data frames.
1564 IF_DEQUEUE(&ic->ic_mgtq, m);
1567 * No data frames go out unless we're associated.
1569 if (ic->ic_state != IEEE80211_S_RUN) {
1570 DPRINTF(sc, ATH_DEBUG_XMIT,
1571 "%s: discard data packet, state %s\n",
1573 ieee80211_state_name[ic->ic_state]);
1574 sc->sc_stats.ast_tx_discard++;
1576 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
1577 ATH_TXBUF_UNLOCK(sc);
1580 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); /* XXX: LOCK */
1583 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
1584 ATH_TXBUF_UNLOCK(sc);
1588 * Cancel any background scan.
1590 if (ic->ic_flags & IEEE80211_F_SCAN)
1591 ieee80211_cancel_scan(ic);
1593 STAILQ_INIT(&frags);
1595 * Find the node for the destination so we can do
1596 * things like power save and fast frames aggregation.
1598 if (m->m_len < sizeof(struct ether_header) &&
1599 (m = m_pullup(m, sizeof(struct ether_header))) == NULL) {
1600 ic->ic_stats.is_tx_nobuf++; /* XXX */
1604 eh = mtod(m, struct ether_header *);
1605 ni = ieee80211_find_txnode(ic, eh->ether_dhost);
1607 /* NB: ieee80211_find_txnode does stat+msg */
1611 if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) &&
1612 (m->m_flags & M_PWR_SAV) == 0) {
1614 * Station in power save mode; pass the frame
1615 * to the 802.11 layer and continue. We'll get
1616 * the frame back when the time is right.
1618 ieee80211_pwrsave(ni, m);
1621 /* calculate priority so we can find the tx queue */
1622 if (ieee80211_classify(ic, m, ni)) {
1623 DPRINTF(sc, ATH_DEBUG_XMIT,
1624 "%s: discard, classification failure\n",
1629 pri = M_WME_GETAC(m);
1630 txq = sc->sc_ac2q[pri];
1631 if (ni->ni_ath_flags & IEEE80211_NODE_FF) {
1633 * Check queue length; if too deep drop this
1634 * frame (tail drop considered good).
1636 if (txq->axq_depth >= sc->sc_fftxqmax) {
1637 DPRINTF(sc, ATH_DEBUG_FF,
1638 "[%s] tail drop on q %u depth %u\n",
1639 ether_sprintf(ni->ni_macaddr),
1640 txq->axq_qnum, txq->axq_depth);
1641 sc->sc_stats.ast_tx_qfull++;
1645 m = ath_ff_check(sc, txq, bf, m, ni);
1647 /* NB: ni ref & bf held on stageq */
1654 * Encapsulate the packet in prep for transmission.
1656 m = ieee80211_encap(ic, m, ni);
1658 DPRINTF(sc, ATH_DEBUG_XMIT,
1659 "%s: encapsulation failure\n",
1661 sc->sc_stats.ast_tx_encap++;
1665 * Check for fragmentation. If this frame
1666 * has been broken up verify we have enough
1667 * buffers to send all the fragments so all
1670 if ((m->m_flags & M_FRAG) &&
1671 !ath_txfrag_setup(sc, &frags, m, ni)) {
1672 DPRINTF(sc, ATH_DEBUG_XMIT,
1673 "%s: out of txfrag buffers\n", __func__);
1674 ic->ic_stats.is_tx_nobuf++; /* XXX */
1680 * Hack! The referenced node pointer is in the
1681 * rcvif field of the packet header. This is
1682 * placed there by ieee80211_mgmt_output because
1683 * we need to hold the reference with the frame
1684 * and there's no other way (other than packet
1685 * tags which we consider too expensive to use)
1688 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1689 m->m_pkthdr.rcvif = NULL;
1691 wh = mtod(m, struct ieee80211_frame *);
1692 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
1693 IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
1694 /* fill time stamp */
1698 tsf = ath_hal_gettsf64(ah);
1699 /* XXX: adjust 100us delay to xmit */
1701 tstamp = (u_int32_t *)&wh[1];
1702 tstamp[0] = htole32(tsf & 0xffffffff);
1703 tstamp[1] = htole32(tsf >> 32);
1705 sc->sc_stats.ast_tx_mgmt++;
1710 * Pass the frame to the h/w for transmission.
1711 * Fragmented frames have each frag chained together
1712 * with m_nextpkt. We know there are sufficient ath_buf's
1713 * to send all the frags because of work done by
1714 * ath_txfrag_setup. We leave m_nextpkt set while
1715 * calling ath_tx_start so it can use it to extend the
1716 * the tx duration to cover the subsequent frag and
1717 * so it can reclaim all the mbufs in case of an error;
1718 * ath_tx_start clears m_nextpkt once it commits to
1719 * handing the frame to the hardware.
1721 next = m->m_nextpkt;
1722 if (ath_tx_start(sc, ni, bf, m)) {
1729 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
1730 ath_txfrag_cleanup(sc, &frags, ni);
1731 ATH_TXBUF_UNLOCK(sc);
1733 ieee80211_free_node(ni);
1738 * Beware of state changing between frags.
1739 * XXX check sta power-save state?
1741 if (ic->ic_state != IEEE80211_S_RUN) {
1742 DPRINTF(sc, ATH_DEBUG_XMIT,
1743 "%s: flush fragmented packet, state %s\n",
1745 ieee80211_state_name[ic->ic_state]);
1750 bf = STAILQ_FIRST(&frags);
1751 KASSERT(bf != NULL, ("no buf for txfrag"));
1752 STAILQ_REMOVE_HEAD(&frags, bf_list);
1757 ic->ic_lastdata = ticks;
1760 * Flush stale frames from the fast-frame staging queue.
1762 if (ic->ic_opmode != IEEE80211_M_STA)
1763 ath_ff_stageq_flush(sc, txq, ath_ff_ageflushtestdone);
1769 ath_media_change(struct ifnet *ifp)
1771 #define IS_UP(ifp) \
1772 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1775 error = ieee80211_media_change(ifp);
1776 if (error == ENETRESET) {
1777 struct ath_softc *sc = ifp->if_softc;
1778 struct ieee80211com *ic = &sc->sc_ic;
1780 if (ic->ic_opmode == IEEE80211_M_AHDEMO) {
1782 * Adhoc demo mode is just ibss mode w/o beacons
1783 * (mostly). The hal knows nothing about it;
1784 * tell it we're operating in ibss mode.
1786 sc->sc_opmode = HAL_M_IBSS;
1788 sc->sc_opmode = ic->ic_opmode;
1790 ath_init(sc); /* XXX lose error */
1799 ath_keyprint(struct ath_softc *sc, const char *tag, u_int ix,
1800 const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN])
1802 static const char *ciphers[] = {
1812 printf("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]);
1813 for (i = 0, n = hk->kv_len; i < n; i++)
1814 printf("%02x", hk->kv_val[i]);
1815 printf(" mac %s", ether_sprintf(mac));
1816 if (hk->kv_type == HAL_CIPHER_TKIP) {
1817 printf(" %s ", sc->sc_splitmic ? "mic" : "rxmic");
1818 for (i = 0; i < sizeof(hk->kv_mic); i++)
1819 printf("%02x", hk->kv_mic[i]);
1820 #if HAL_ABI_VERSION > 0x06052200
1821 if (!sc->sc_splitmic) {
1823 for (i = 0; i < sizeof(hk->kv_txmic); i++)
1824 printf("%02x", hk->kv_txmic[i]);
1833 * Set a TKIP key into the hardware. This handles the
1834 * potential distribution of key state to multiple key
1835 * cache slots for TKIP.
1838 ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k,
1839 HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN])
1841 #define IEEE80211_KEY_XR (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV)
1842 static const u_int8_t zerobssid[IEEE80211_ADDR_LEN];
1843 struct ath_hal *ah = sc->sc_ah;
1845 KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP,
1846 ("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher));
1847 if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) {
1848 if (sc->sc_splitmic) {
1850 * TX key goes at first index, RX key at the rx index.
1851 * The hal handles the MIC keys at index+64.
1853 memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic));
1854 KEYPRINTF(sc, k->wk_keyix, hk, zerobssid);
1855 if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid))
1858 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
1859 KEYPRINTF(sc, k->wk_keyix+32, hk, mac);
1860 /* XXX delete tx key on failure? */
1861 return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac);
1864 * Room for both TX+RX MIC keys in one key cache
1865 * slot, just set key at the first index; the hal
1866 * will handle the reset.
1868 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
1869 #if HAL_ABI_VERSION > 0x06052200
1870 memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic));
1872 KEYPRINTF(sc, k->wk_keyix, hk, mac);
1873 return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
1875 } else if (k->wk_flags & IEEE80211_KEY_XR) {
1877 * TX/RX key goes at first index.
1878 * The hal handles the MIC keys are index+64.
1880 memcpy(hk->kv_mic, k->wk_flags & IEEE80211_KEY_XMIT ?
1881 k->wk_txmic : k->wk_rxmic, sizeof(hk->kv_mic));
1882 KEYPRINTF(sc, k->wk_keyix, hk, mac);
1883 return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
1886 #undef IEEE80211_KEY_XR
1890 * Set a net80211 key into the hardware. This handles the
1891 * potential distribution of key state to multiple key
1892 * cache slots for TKIP with hardware MIC support.
1895 ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k,
1896 const u_int8_t mac0[IEEE80211_ADDR_LEN],
1897 struct ieee80211_node *bss)
1899 #define N(a) (sizeof(a)/sizeof(a[0]))
1900 static const u_int8_t ciphermap[] = {
1901 HAL_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */
1902 HAL_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */
1903 HAL_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */
1904 HAL_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */
1905 (u_int8_t) -1, /* 4 is not allocated */
1906 HAL_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */
1907 HAL_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */
1909 struct ath_hal *ah = sc->sc_ah;
1910 const struct ieee80211_cipher *cip = k->wk_cipher;
1911 u_int8_t gmac[IEEE80211_ADDR_LEN];
1912 const u_int8_t *mac;
1915 memset(&hk, 0, sizeof(hk));
1917 * Software crypto uses a "clear key" so non-crypto
1918 * state kept in the key cache are maintained and
1919 * so that rx frames have an entry to match.
1921 if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) {
1922 KASSERT(cip->ic_cipher < N(ciphermap),
1923 ("invalid cipher type %u", cip->ic_cipher));
1924 hk.kv_type = ciphermap[cip->ic_cipher];
1925 hk.kv_len = k->wk_keylen;
1926 memcpy(hk.kv_val, k->wk_key, k->wk_keylen);
1928 hk.kv_type = HAL_CIPHER_CLR;
1930 if ((k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) {
1932 * Group keys on hardware that supports multicast frame
1933 * key search use a mac that is the sender's address with
1934 * the high bit set instead of the app-specified address.
1936 IEEE80211_ADDR_COPY(gmac, bss->ni_macaddr);
1942 if (hk.kv_type == HAL_CIPHER_TKIP &&
1943 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
1944 return ath_keyset_tkip(sc, k, &hk, mac);
1946 KEYPRINTF(sc, k->wk_keyix, &hk, mac);
1947 return ath_hal_keyset(ah, k->wk_keyix, &hk, mac);
1953 * Allocate tx/rx key slots for TKIP. We allocate two slots for
1954 * each key, one for decrypt/encrypt and the other for the MIC.
1957 key_alloc_2pair(struct ath_softc *sc,
1958 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
1960 #define N(a) (sizeof(a)/sizeof(a[0]))
1963 KASSERT(sc->sc_splitmic, ("key cache !split"));
1964 /* XXX could optimize */
1965 for (i = 0; i < N(sc->sc_keymap)/4; i++) {
1966 u_int8_t b = sc->sc_keymap[i];
1969 * One or more slots in this byte are free.
1977 /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */
1978 if (isset(sc->sc_keymap, keyix+32) ||
1979 isset(sc->sc_keymap, keyix+64) ||
1980 isset(sc->sc_keymap, keyix+32+64)) {
1981 /* full pair unavailable */
1983 if (keyix == (i+1)*NBBY) {
1984 /* no slots were appropriate, advance */
1989 setbit(sc->sc_keymap, keyix);
1990 setbit(sc->sc_keymap, keyix+64);
1991 setbit(sc->sc_keymap, keyix+32);
1992 setbit(sc->sc_keymap, keyix+32+64);
1993 DPRINTF(sc, ATH_DEBUG_KEYCACHE,
1994 "%s: key pair %u,%u %u,%u\n",
1995 __func__, keyix, keyix+64,
1996 keyix+32, keyix+32+64);
1998 *rxkeyix = keyix+32;
2002 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__);
2008 * Allocate tx/rx key slots for TKIP. We allocate two slots for
2009 * each key, one for decrypt/encrypt and the other for the MIC.
2012 key_alloc_pair(struct ath_softc *sc,
2013 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2015 #define N(a) (sizeof(a)/sizeof(a[0]))
2018 KASSERT(!sc->sc_splitmic, ("key cache split"));
2019 /* XXX could optimize */
2020 for (i = 0; i < N(sc->sc_keymap)/4; i++) {
2021 u_int8_t b = sc->sc_keymap[i];
2024 * One or more slots in this byte are free.
2032 if (isset(sc->sc_keymap, keyix+64)) {
2033 /* full pair unavailable */
2035 if (keyix == (i+1)*NBBY) {
2036 /* no slots were appropriate, advance */
2041 setbit(sc->sc_keymap, keyix);
2042 setbit(sc->sc_keymap, keyix+64);
2043 DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2044 "%s: key pair %u,%u\n",
2045 __func__, keyix, keyix+64);
2046 *txkeyix = *rxkeyix = keyix;
2050 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__);
2056 * Allocate a single key cache slot.
2059 key_alloc_single(struct ath_softc *sc,
2060 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2062 #define N(a) (sizeof(a)/sizeof(a[0]))
2065 /* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */
2066 for (i = 0; i < N(sc->sc_keymap); i++) {
2067 u_int8_t b = sc->sc_keymap[i];
2070 * One or more slots are free.
2075 setbit(sc->sc_keymap, keyix);
2076 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n",
2078 *txkeyix = *rxkeyix = keyix;
2082 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__);
2088 * Allocate one or more key cache slots for a uniacst key. The
2089 * key itself is needed only to identify the cipher. For hardware
2090 * TKIP with split cipher+MIC keys we allocate two key cache slot
2091 * pairs so that we can setup separate TX and RX MIC keys. Note
2092 * that the MIC key for a TKIP key at slot i is assumed by the
2093 * hardware to be at slot i+64. This limits TKIP keys to the first
2097 ath_key_alloc(struct ieee80211com *ic, const struct ieee80211_key *k,
2098 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
2100 struct ath_softc *sc = ic->ic_ifp->if_softc;
2103 * Group key allocation must be handled specially for
2104 * parts that do not support multicast key cache search
2105 * functionality. For those parts the key id must match
2106 * the h/w key index so lookups find the right key. On
2107 * parts w/ the key search facility we install the sender's
2108 * mac address (with the high bit set) and let the hardware
2109 * find the key w/o using the key id. This is preferred as
2110 * it permits us to support multiple users for adhoc and/or
2111 * multi-station operation.
2113 if ((k->wk_flags & IEEE80211_KEY_GROUP) && !sc->sc_mcastkey) {
2114 if (!(&ic->ic_nw_keys[0] <= k &&
2115 k < &ic->ic_nw_keys[IEEE80211_WEP_NKID])) {
2116 /* should not happen */
2117 DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2118 "%s: bogus group key\n", __func__);
2122 * XXX we pre-allocate the global keys so
2123 * have no way to check if they've already been allocated.
2125 *keyix = *rxkeyix = k - ic->ic_nw_keys;
2130 * We allocate two pair for TKIP when using the h/w to do
2131 * the MIC. For everything else, including software crypto,
2132 * we allocate a single entry. Note that s/w crypto requires
2133 * a pass-through slot on the 5211 and 5212. The 5210 does
2134 * not support pass-through cache entries and we map all
2135 * those requests to slot 0.
2137 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
2138 return key_alloc_single(sc, keyix, rxkeyix);
2139 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP &&
2140 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2141 if (sc->sc_splitmic)
2142 return key_alloc_2pair(sc, keyix, rxkeyix);
2144 return key_alloc_pair(sc, keyix, rxkeyix);
2146 return key_alloc_single(sc, keyix, rxkeyix);
2151 * Delete an entry in the key cache allocated by ath_key_alloc.
2154 ath_key_delete(struct ieee80211com *ic, const struct ieee80211_key *k)
2156 struct ath_softc *sc = ic->ic_ifp->if_softc;
2157 struct ath_hal *ah = sc->sc_ah;
2158 const struct ieee80211_cipher *cip = k->wk_cipher;
2159 u_int keyix = k->wk_keyix;
2161 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix);
2163 ath_hal_keyreset(ah, keyix);
2165 * Handle split tx/rx keying required for TKIP with h/w MIC.
2167 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
2168 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic)
2169 ath_hal_keyreset(ah, keyix+32); /* RX key */
2170 if (keyix >= IEEE80211_WEP_NKID) {
2172 * Don't touch keymap entries for global keys so
2173 * they are never considered for dynamic allocation.
2175 clrbit(sc->sc_keymap, keyix);
2176 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
2177 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2178 clrbit(sc->sc_keymap, keyix+64); /* TX key MIC */
2179 if (sc->sc_splitmic) {
2180 /* +32 for RX key, +32+64 for RX key MIC */
2181 clrbit(sc->sc_keymap, keyix+32);
2182 clrbit(sc->sc_keymap, keyix+32+64);
2190 * Set the key cache contents for the specified key. Key cache
2191 * slot(s) must already have been allocated by ath_key_alloc.
2194 ath_key_set(struct ieee80211com *ic, const struct ieee80211_key *k,
2195 const u_int8_t mac[IEEE80211_ADDR_LEN])
2197 struct ath_softc *sc = ic->ic_ifp->if_softc;
2199 return ath_keyset(sc, k, mac, ic->ic_bss);
2203 * Block/unblock tx+rx processing while a key change is done.
2204 * We assume the caller serializes key management operations
2205 * so we only need to worry about synchronization with other
2206 * uses that originate in the driver.
2209 ath_key_update_begin(struct ieee80211com *ic)
2211 struct ifnet *ifp = ic->ic_ifp;
2212 struct ath_softc *sc = ifp->if_softc;
2214 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2216 tasklet_disable(&sc->sc_rxtq);
2218 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */
2222 ath_key_update_end(struct ieee80211com *ic)
2224 struct ifnet *ifp = ic->ic_ifp;
2225 struct ath_softc *sc = ifp->if_softc;
2227 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2228 IF_UNLOCK(&ifp->if_snd);
2230 tasklet_enable(&sc->sc_rxtq);
2235 * Calculate the receive filter according to the
2236 * operating mode and state:
2238 * o always accept unicast, broadcast, and multicast traffic
2239 * o maintain current state of phy error reception (the hal
2240 * may enable phy error frames for noise immunity work)
2241 * o probe request frames are accepted only when operating in
2242 * hostap, adhoc, or monitor modes
2243 * o enable promiscuous mode according to the interface state
2245 * - when operating in adhoc mode so the 802.11 layer creates
2246 * node table entries for peers,
2247 * - when operating in station mode for collecting rssi data when
2248 * the station is otherwise quiet, or
2250 * o accept control frames:
2251 * - when in monitor mode
2254 ath_calcrxfilter(struct ath_softc *sc)
2256 struct ieee80211com *ic = &sc->sc_ic;
2257 struct ifnet *ifp = sc->sc_ifp;
2260 rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
2261 if (!sc->sc_needmib && !sc->sc_scanning)
2262 rfilt |= HAL_RX_FILTER_PHYERR;
2263 if (ic->ic_opmode != IEEE80211_M_STA)
2264 rfilt |= HAL_RX_FILTER_PROBEREQ;
2265 if (ic->ic_opmode != IEEE80211_M_HOSTAP &&
2266 (ifp->if_flags & IFF_PROMISC))
2267 rfilt |= HAL_RX_FILTER_PROM;
2268 if (ic->ic_opmode == IEEE80211_M_STA ||
2269 ic->ic_opmode == IEEE80211_M_IBSS ||
2271 rfilt |= HAL_RX_FILTER_BEACON;
2272 if (ic->ic_opmode == IEEE80211_M_MONITOR)
2273 rfilt |= HAL_RX_FILTER_CONTROL;
2275 #undef RX_FILTER_PRESERVE
2279 ath_mode_init(struct ath_softc *sc)
2281 struct ieee80211com *ic = &sc->sc_ic;
2282 struct ath_hal *ah = sc->sc_ah;
2283 struct ifnet *ifp = sc->sc_ifp;
2284 u_int32_t rfilt, mfilt[2], val;
2286 struct ifmultiaddr *ifma;
2288 /* configure rx filter */
2289 rfilt = ath_calcrxfilter(sc);
2290 ath_hal_setrxfilter(ah, rfilt);
2292 /* configure operational mode */
2293 ath_hal_setopmode(ah);
2296 * Handle any link-level address change. Note that we only
2297 * need to force ic_myaddr; any other addresses are handled
2298 * as a byproduct of the ifnet code marking the interface
2301 * XXX should get from lladdr instead of arpcom but that's more work
2303 IEEE80211_ADDR_COPY(ic->ic_myaddr, IF_LLADDR(ifp));
2304 ath_hal_setmac(ah, ic->ic_myaddr);
2306 /* calculate and install multicast filter */
2307 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2308 mfilt[0] = mfilt[1] = 0;
2310 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2313 /* calculate XOR of eight 6bit values */
2314 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2315 val = LE_READ_4(dl + 0);
2316 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2317 val = LE_READ_4(dl + 3);
2318 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2320 mfilt[pos / 32] |= (1 << (pos % 32));
2322 IF_ADDR_UNLOCK(ifp);
2324 mfilt[0] = mfilt[1] = ~0;
2326 ath_hal_setmcastfilter(ah, mfilt[0], mfilt[1]);
2327 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, MC filter %08x:%08x\n",
2328 __func__, rfilt, mfilt[0], mfilt[1]);
2332 * Set the slot time based on the current setting.
2335 ath_setslottime(struct ath_softc *sc)
2337 struct ieee80211com *ic = &sc->sc_ic;
2338 struct ath_hal *ah = sc->sc_ah;
2341 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
2343 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
2345 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
2346 /* honor short/long slot time only in 11g */
2347 /* XXX shouldn't honor on pure g or turbo g channel */
2348 if (ic->ic_flags & IEEE80211_F_SHSLOT)
2349 usec = HAL_SLOT_TIME_9;
2351 usec = HAL_SLOT_TIME_20;
2353 usec = HAL_SLOT_TIME_9;
2355 DPRINTF(sc, ATH_DEBUG_RESET,
2356 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
2357 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
2358 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
2360 ath_hal_setslottime(ah, usec);
2361 sc->sc_updateslot = OK;
2365 * Callback from the 802.11 layer to update the
2366 * slot time based on the current setting.
2369 ath_updateslot(struct ifnet *ifp)
2371 struct ath_softc *sc = ifp->if_softc;
2372 struct ieee80211com *ic = &sc->sc_ic;
2375 * When not coordinating the BSS, change the hardware
2376 * immediately. For other operation we defer the change
2377 * until beacon updates have propagated to the stations.
2379 if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2380 sc->sc_updateslot = UPDATE;
2382 ath_setslottime(sc);
2386 * Setup a h/w transmit queue for beacons.
2389 ath_beaconq_setup(struct ath_hal *ah)
2393 memset(&qi, 0, sizeof(qi));
2394 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
2395 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
2396 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
2397 /* NB: for dynamic turbo, don't enable any other interrupts */
2398 qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE;
2399 return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi);
2403 * Setup the transmit queue parameters for the beacon queue.
2406 ath_beaconq_config(struct ath_softc *sc)
2408 #define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1)
2409 struct ieee80211com *ic = &sc->sc_ic;
2410 struct ath_hal *ah = sc->sc_ah;
2413 ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi);
2414 if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
2416 * Always burst out beacon and CAB traffic.
2418 qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT;
2419 qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT;
2420 qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT;
2422 struct wmeParams *wmep =
2423 &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE];
2425 * Adhoc mode; important thing is to use 2x cwmin.
2427 qi.tqi_aifs = wmep->wmep_aifsn;
2428 qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2429 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2432 if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) {
2433 device_printf(sc->sc_dev, "unable to update parameters for "
2434 "beacon hardware queue!\n");
2437 ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
2440 #undef ATH_EXPONENT_TO_VALUE
2444 * Allocate and setup an initial beacon frame.
2447 ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
2453 bf = STAILQ_FIRST(&sc->sc_bbuf);
2455 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: no dma buffers\n", __func__);
2456 sc->sc_stats.ast_be_nombuf++; /* XXX */
2457 return ENOMEM; /* XXX */
2460 * NB: the beacon data buffer must be 32-bit aligned;
2461 * we assume the mbuf routines will return us something
2462 * with this alignment (perhaps should assert).
2464 m = ieee80211_beacon_alloc(ni, &sc->sc_boff);
2466 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: cannot get mbuf\n",
2468 sc->sc_stats.ast_be_nombuf++;
2471 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2472 bf->bf_segs, &bf->bf_nseg,
2476 bf->bf_node = ieee80211_ref_node(ni);
2484 * Setup the beacon frame for transmit.
2487 ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf)
2489 #define USE_SHPREAMBLE(_ic) \
2490 (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\
2491 == IEEE80211_F_SHPREAMBLE)
2492 struct ieee80211_node *ni = bf->bf_node;
2493 struct ieee80211com *ic = ni->ni_ic;
2494 struct mbuf *m = bf->bf_m;
2495 struct ath_hal *ah = sc->sc_ah;
2496 struct ath_desc *ds;
2498 const HAL_RATE_TABLE *rt;
2501 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n",
2502 __func__, m, m->m_len);
2504 /* setup descriptors */
2507 flags = HAL_TXDESC_NOACK;
2508 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) {
2509 ds->ds_link = bf->bf_daddr; /* self-linked */
2510 flags |= HAL_TXDESC_VEOL;
2512 * Let hardware handle antenna switching.
2514 antenna = sc->sc_txantenna;
2518 * Switch antenna every 4 beacons.
2519 * XXX assumes two antenna
2521 antenna = sc->sc_txantenna != 0 ? sc->sc_txantenna
2522 : (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1);
2525 KASSERT(bf->bf_nseg == 1,
2526 ("multi-segment beacon frame; nseg %u", bf->bf_nseg));
2527 ds->ds_data = bf->bf_segs[0].ds_addr;
2529 * Calculate rate code.
2530 * XXX everything at min xmit rate
2532 rix = sc->sc_minrateix;
2533 rt = sc->sc_currates;
2534 rate = rt->info[rix].rateCode;
2535 if (USE_SHPREAMBLE(ic))
2536 rate |= rt->info[rix].shortPreamble;
2537 ath_hal_setuptxdesc(ah, ds
2538 , m->m_len + IEEE80211_CRC_LEN /* frame length */
2539 , sizeof(struct ieee80211_frame)/* header length */
2540 , HAL_PKT_TYPE_BEACON /* Atheros packet type */
2541 , ni->ni_txpower /* txpower XXX */
2542 , rate, 1 /* series 0 rate/tries */
2543 , HAL_TXKEYIX_INVALID /* no encryption */
2544 , antenna /* antenna mode */
2545 , flags /* no ack, veol for beacons */
2546 , 0 /* rts/cts rate */
2547 , 0 /* rts/cts duration */
2549 /* NB: beacon's BufLen must be a multiple of 4 bytes */
2550 ath_hal_filltxdesc(ah, ds
2551 , roundup(m->m_len, 4) /* buffer length */
2552 , AH_TRUE /* first segment */
2553 , AH_TRUE /* last segment */
2554 , ds /* first descriptor */
2556 #undef USE_SHPREAMBLE
2560 ath_beacon_update(struct ieee80211com *ic, int item)
2562 struct ath_softc *sc = ic->ic_ifp->if_softc;
2563 struct ieee80211_beacon_offsets *bo = &sc->sc_boff;
2565 setbit(bo->bo_flags, item);
2569 * Append the contents of src to dst; both queues
2570 * are assumed to be locked.
2573 ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
2575 STAILQ_CONCAT(&dst->axq_q, &src->axq_q);
2576 dst->axq_link = src->axq_link;
2577 src->axq_link = NULL;
2578 dst->axq_depth += src->axq_depth;
2583 * Transmit a beacon frame at SWBA. Dynamic updates to the
2584 * frame contents are done as needed and the slot time is
2585 * also adjusted based on current state.
2588 ath_beacon_proc(void *arg, int pending)
2590 struct ath_softc *sc = arg;
2591 struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf);
2592 struct ieee80211_node *ni = bf->bf_node;
2593 struct ieee80211com *ic = ni->ni_ic;
2594 struct ath_hal *ah = sc->sc_ah;
2595 struct ath_txq *cabq = sc->sc_cabq;
2597 int ncabq, nmcastq, error, otherant;
2599 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n",
2602 if (ic->ic_opmode == IEEE80211_M_STA ||
2603 ic->ic_opmode == IEEE80211_M_MONITOR ||
2604 bf == NULL || bf->bf_m == NULL) {
2605 DPRINTF(sc, ATH_DEBUG_ANY, "%s: ic_flags=%x bf=%p bf_m=%p\n",
2606 __func__, ic->ic_flags, bf, bf ? bf->bf_m : NULL);
2610 * Check if the previous beacon has gone out. If
2611 * not don't try to post another, skip this period
2612 * and wait for the next. Missed beacons indicate
2613 * a problem and should not occur. If we miss too
2614 * many consecutive beacons reset the device.
2616 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
2617 sc->sc_bmisscount++;
2618 DPRINTF(sc, ATH_DEBUG_BEACON,
2619 "%s: missed %u consecutive beacons\n",
2620 __func__, sc->sc_bmisscount);
2621 if (sc->sc_bmisscount > 3) /* NB: 3 is a guess */
2622 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
2625 if (sc->sc_bmisscount != 0) {
2626 DPRINTF(sc, ATH_DEBUG_BEACON,
2627 "%s: resume beacon xmit after %u misses\n",
2628 __func__, sc->sc_bmisscount);
2629 sc->sc_bmisscount = 0;
2633 * Update dynamic beacon contents. If this returns
2634 * non-zero then we need to remap the memory because
2635 * the beacon frame changed size (probably because
2636 * of the TIM bitmap).
2639 nmcastq = sc->sc_mcastq.axq_depth;
2640 ncabq = ath_hal_numtxpending(ah, cabq->axq_qnum);
2641 if (ieee80211_beacon_update(bf->bf_node, &sc->sc_boff, m, ncabq+nmcastq)) {
2642 /* XXX too conservative? */
2643 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2644 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2645 bf->bf_segs, &bf->bf_nseg,
2648 if_printf(ic->ic_ifp,
2649 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
2654 if (ncabq && (sc->sc_boff.bo_tim[4] & 1)) {
2656 * CABQ traffic from the previous DTIM is still pending.
2657 * This is ok for now but when there are multiple vap's
2658 * and we are using staggered beacons we'll want to drain
2659 * the cabq before loading frames for the different vap.
2661 DPRINTF(sc, ATH_DEBUG_BEACON,
2662 "%s: cabq did not drain, mcastq %u cabq %u/%u\n",
2663 __func__, nmcastq, ncabq, cabq->axq_depth);
2664 sc->sc_stats.ast_cabq_busy++;
2668 * Handle slot time change when a non-ERP station joins/leaves
2669 * an 11g network. The 802.11 layer notifies us via callback,
2670 * we mark updateslot, then wait one beacon before effecting
2671 * the change. This gives associated stations at least one
2672 * beacon interval to note the state change.
2675 if (sc->sc_updateslot == UPDATE)
2676 sc->sc_updateslot = COMMIT; /* commit next beacon */
2677 else if (sc->sc_updateslot == COMMIT)
2678 ath_setslottime(sc); /* commit change to h/w */
2681 * Check recent per-antenna transmit statistics and flip
2682 * the default antenna if noticeably more frames went out
2683 * on the non-default antenna.
2684 * XXX assumes 2 anntenae
2686 otherant = sc->sc_defant & 1 ? 2 : 1;
2687 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
2688 ath_setdefantenna(sc, otherant);
2689 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
2692 * Construct tx descriptor.
2694 ath_beacon_setup(sc, bf);
2697 * Stop any current dma and put the new frame on the queue.
2698 * This should never fail since we check above that no frames
2699 * are still pending on the queue.
2701 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
2702 DPRINTF(sc, ATH_DEBUG_ANY,
2703 "%s: beacon queue %u did not stop?\n",
2704 __func__, sc->sc_bhalq);
2706 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
2709 * Enable the CAB queue before the beacon queue to
2710 * insure cab frames are triggered by this beacon.
2712 if (sc->sc_boff.bo_tim_len && (sc->sc_boff.bo_tim[4] & 1)) {
2713 /* NB: only at DTIM */
2715 ATH_TXQ_LOCK(&sc->sc_mcastq);
2717 struct ath_buf *bfm;
2720 * Move frames from the s/w mcast q to the h/w cab q.
2722 bfm = STAILQ_FIRST(&sc->sc_mcastq.axq_q);
2723 if (cabq->axq_link != NULL) {
2724 *cabq->axq_link = bfm->bf_daddr;
2726 ath_hal_puttxbuf(ah, cabq->axq_qnum,
2728 ath_txqmove(cabq, &sc->sc_mcastq);
2730 sc->sc_stats.ast_cabq_xmit += nmcastq;
2732 /* NB: gated by beacon so safe to start here */
2733 ath_hal_txstart(ah, cabq->axq_qnum);
2734 ATH_TXQ_UNLOCK(cabq);
2735 ATH_TXQ_UNLOCK(&sc->sc_mcastq);
2737 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
2738 ath_hal_txstart(ah, sc->sc_bhalq);
2739 DPRINTF(sc, ATH_DEBUG_BEACON_PROC,
2740 "%s: TXDP[%u] = %p (%p)\n", __func__,
2741 sc->sc_bhalq, (caddr_t)bf->bf_daddr, bf->bf_desc);
2743 sc->sc_stats.ast_be_xmit++;
2747 * Reset the hardware after detecting beacons have stopped.
2750 ath_bstuck_proc(void *arg, int pending)
2752 struct ath_softc *sc = arg;
2753 struct ifnet *ifp = sc->sc_ifp;
2755 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
2761 * Reclaim beacon resources.
2764 ath_beacon_free(struct ath_softc *sc)
2768 STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) {
2769 if (bf->bf_m != NULL) {
2770 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2774 if (bf->bf_node != NULL) {
2775 ieee80211_free_node(bf->bf_node);
2782 * Configure the beacon and sleep timers.
2784 * When operating as an AP this resets the TSF and sets
2785 * up the hardware to notify us when we need to issue beacons.
2787 * When operating in station mode this sets up the beacon
2788 * timers according to the timestamp of the last received
2789 * beacon and the current TSF, configures PCF and DTIM
2790 * handling, programs the sleep registers so the hardware
2791 * will wakeup in time to receive beacons, and configures
2792 * the beacon miss handling so we'll receive a BMISS
2793 * interrupt when we stop seeing beacons from the AP
2794 * we've associated with.
2797 ath_beacon_config(struct ath_softc *sc)
2799 #define TSF_TO_TU(_h,_l) \
2800 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
2802 struct ath_hal *ah = sc->sc_ah;
2803 struct ieee80211com *ic = &sc->sc_ic;
2804 struct ieee80211_node *ni = ic->ic_bss;
2805 u_int32_t nexttbtt, intval, tsftu;
2808 /* extract tstamp from last beacon and convert to TU */
2809 nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4),
2810 LE_READ_4(ni->ni_tstamp.data));
2811 /* NB: the beacon interval is kept internally in TU's */
2812 intval = ni->ni_intval & HAL_BEACON_PERIOD;
2813 if (nexttbtt == 0) /* e.g. for ap mode */
2815 else if (intval) /* NB: can be 0 for monitor mode */
2816 nexttbtt = roundup(nexttbtt, intval);
2817 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
2818 __func__, nexttbtt, intval, ni->ni_intval);
2819 if (ic->ic_opmode == IEEE80211_M_STA) {
2820 HAL_BEACON_STATE bs;
2821 int dtimperiod, dtimcount;
2822 int cfpperiod, cfpcount;
2825 * Setup dtim and cfp parameters according to
2826 * last beacon we received (which may be none).
2828 dtimperiod = ni->ni_dtim_period;
2829 if (dtimperiod <= 0) /* NB: 0 if not known */
2831 dtimcount = ni->ni_dtim_count;
2832 if (dtimcount >= dtimperiod) /* NB: sanity check */
2833 dtimcount = 0; /* XXX? */
2834 cfpperiod = 1; /* NB: no PCF support yet */
2837 * Pull nexttbtt forward to reflect the current
2838 * TSF and calculate dtim+cfp state for the result.
2840 tsf = ath_hal_gettsf64(ah);
2841 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
2844 if (--dtimcount < 0) {
2845 dtimcount = dtimperiod - 1;
2847 cfpcount = cfpperiod - 1;
2849 } while (nexttbtt < tsftu);
2850 memset(&bs, 0, sizeof(bs));
2851 bs.bs_intval = intval;
2852 bs.bs_nexttbtt = nexttbtt;
2853 bs.bs_dtimperiod = dtimperiod*intval;
2854 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
2855 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
2856 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
2857 bs.bs_cfpmaxduration = 0;
2860 * The 802.11 layer records the offset to the DTIM
2861 * bitmap while receiving beacons; use it here to
2862 * enable h/w detection of our AID being marked in
2863 * the bitmap vector (to indicate frames for us are
2864 * pending at the AP).
2865 * XXX do DTIM handling in s/w to WAR old h/w bugs
2866 * XXX enable based on h/w rev for newer chips
2868 bs.bs_timoffset = ni->ni_timoff;
2871 * Calculate the number of consecutive beacons to miss
2872 * before taking a BMISS interrupt.
2873 * Note that we clamp the result to at most 10 beacons.
2875 bs.bs_bmissthreshold = ic->ic_bmissthreshold;
2876 if (bs.bs_bmissthreshold > 10)
2877 bs.bs_bmissthreshold = 10;
2878 else if (bs.bs_bmissthreshold <= 0)
2879 bs.bs_bmissthreshold = 1;
2882 * Calculate sleep duration. The configuration is
2883 * given in ms. We insure a multiple of the beacon
2884 * period is used. Also, if the sleep duration is
2885 * greater than the DTIM period then it makes senses
2886 * to make it a multiple of that.
2888 * XXX fixed at 100ms
2890 bs.bs_sleepduration =
2891 roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval);
2892 if (bs.bs_sleepduration > bs.bs_dtimperiod)
2893 bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod);
2895 DPRINTF(sc, ATH_DEBUG_BEACON,
2896 "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n"
2903 , bs.bs_bmissthreshold
2904 , bs.bs_sleepduration
2906 , bs.bs_cfpmaxduration
2910 ath_hal_intrset(ah, 0);
2911 ath_hal_beacontimers(ah, &bs);
2912 sc->sc_imask |= HAL_INT_BMISS;
2913 ath_hal_intrset(ah, sc->sc_imask);
2915 ath_hal_intrset(ah, 0);
2916 if (nexttbtt == intval)
2917 intval |= HAL_BEACON_RESET_TSF;
2918 if (ic->ic_opmode == IEEE80211_M_IBSS) {
2920 * In IBSS mode enable the beacon timers but only
2921 * enable SWBA interrupts if we need to manually
2922 * prepare beacon frames. Otherwise we use a
2923 * self-linked tx descriptor and let the hardware
2926 intval |= HAL_BEACON_ENA;
2927 if (!sc->sc_hasveol)
2928 sc->sc_imask |= HAL_INT_SWBA;
2929 if ((intval & HAL_BEACON_RESET_TSF) == 0) {
2931 * Pull nexttbtt forward to reflect
2934 tsf = ath_hal_gettsf64(ah);
2935 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
2938 } while (nexttbtt < tsftu);
2940 ath_beaconq_config(sc);
2941 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
2943 * In AP mode we enable the beacon timers and
2944 * SWBA interrupts to prepare beacon frames.
2946 intval |= HAL_BEACON_ENA;
2947 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */
2948 ath_beaconq_config(sc);
2950 ath_hal_beaconinit(ah, nexttbtt, intval);
2951 sc->sc_bmisscount = 0;
2952 ath_hal_intrset(ah, sc->sc_imask);
2954 * When using a self-linked beacon descriptor in
2955 * ibss mode load it once here.
2957 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol)
2958 ath_beacon_proc(sc, 0);
2960 sc->sc_syncbeacon = 0;
2966 ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2968 bus_addr_t *paddr = (bus_addr_t*) arg;
2969 KASSERT(error == 0, ("error %u on bus_dma callback", error));
2970 *paddr = segs->ds_addr;
2974 ath_descdma_setup(struct ath_softc *sc,
2975 struct ath_descdma *dd, ath_bufhead *head,
2976 const char *name, int nbuf, int ndesc)
2978 #define DS2PHYS(_dd, _ds) \
2979 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2980 struct ifnet *ifp = sc->sc_ifp;
2981 struct ath_desc *ds;
2983 int i, bsize, error;
2985 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n",
2986 __func__, name, nbuf, ndesc);
2989 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
2992 * Setup DMA descriptor area.
2994 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
2995 PAGE_SIZE, 0, /* alignment, bounds */
2996 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2997 BUS_SPACE_MAXADDR, /* highaddr */
2998 NULL, NULL, /* filter, filterarg */
2999 dd->dd_desc_len, /* maxsize */
3001 dd->dd_desc_len, /* maxsegsize */
3002 BUS_DMA_ALLOCNOW, /* flags */
3003 NULL, /* lockfunc */
3007 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
3011 /* allocate descriptors */
3012 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
3014 if_printf(ifp, "unable to create dmamap for %s descriptors, "
3015 "error %u\n", dd->dd_name, error);
3019 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
3020 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
3023 if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
3024 "error %u\n", nbuf * ndesc, dd->dd_name, error);
3028 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
3029 dd->dd_desc, dd->dd_desc_len,
3030 ath_load_cb, &dd->dd_desc_paddr,
3033 if_printf(ifp, "unable to map %s descriptors, error %u\n",
3034 dd->dd_name, error);
3039 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
3040 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
3041 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
3043 /* allocate rx buffers */
3044 bsize = sizeof(struct ath_buf) * nbuf;
3045 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
3047 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3048 dd->dd_name, bsize);
3054 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
3056 bf->bf_daddr = DS2PHYS(dd, ds);
3057 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3060 if_printf(ifp, "unable to create dmamap for %s "
3061 "buffer %u, error %u\n", dd->dd_name, i, error);
3062 ath_descdma_cleanup(sc, dd, head);
3065 STAILQ_INSERT_TAIL(head, bf, bf_list);
3069 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3071 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3073 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3075 bus_dma_tag_destroy(dd->dd_dmat);
3076 memset(dd, 0, sizeof(*dd));
3082 ath_descdma_cleanup(struct ath_softc *sc,
3083 struct ath_descdma *dd, ath_bufhead *head)
3086 struct ieee80211_node *ni;
3088 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3089 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3090 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3091 bus_dma_tag_destroy(dd->dd_dmat);
3093 STAILQ_FOREACH(bf, head, bf_list) {
3098 if (bf->bf_dmamap != NULL) {
3099 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
3100 bf->bf_dmamap = NULL;
3106 * Reclaim node reference.
3108 ieee80211_free_node(ni);
3113 free(dd->dd_bufptr, M_ATHDEV);
3114 memset(dd, 0, sizeof(*dd));
3118 ath_desc_alloc(struct ath_softc *sc)
3122 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
3123 "rx", ath_rxbuf, 1);
3127 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
3128 "tx", ath_txbuf, ATH_TXDESC);
3130 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3134 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
3137 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3138 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3145 ath_desc_free(struct ath_softc *sc)
3148 if (sc->sc_bdma.dd_desc_len != 0)
3149 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
3150 if (sc->sc_txdma.dd_desc_len != 0)
3151 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3152 if (sc->sc_rxdma.dd_desc_len != 0)
3153 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3156 static struct ieee80211_node *
3157 ath_node_alloc(struct ieee80211_node_table *nt)
3159 struct ieee80211com *ic = nt->nt_ic;
3160 struct ath_softc *sc = ic->ic_ifp->if_softc;
3161 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
3162 struct ath_node *an;
3164 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
3169 an->an_avgrssi = ATH_RSSI_DUMMY_MARKER;
3170 ath_rate_node_init(sc, an);
3172 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
3173 return &an->an_node;
3177 ath_node_free(struct ieee80211_node *ni)
3179 struct ieee80211com *ic = ni->ni_ic;
3180 struct ath_softc *sc = ic->ic_ifp->if_softc;
3182 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni);
3184 ath_rate_node_cleanup(sc, ATH_NODE(ni));
3185 sc->sc_node_free(ni);
3189 ath_node_getrssi(const struct ieee80211_node *ni)
3191 #define HAL_EP_RND(x, mul) \
3192 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
3193 u_int32_t avgrssi = ATH_NODE_CONST(ni)->an_avgrssi;
3197 * When only one frame is received there will be no state in
3198 * avgrssi so fallback on the value recorded by the 802.11 layer.
3200 if (avgrssi != ATH_RSSI_DUMMY_MARKER)
3201 rssi = HAL_EP_RND(avgrssi, HAL_RSSI_EP_MULTIPLIER);
3204 return rssi < 0 ? 0 : rssi > 127 ? 127 : rssi;
3209 ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
3211 struct ieee80211com *ic = ni->ni_ic;
3212 struct ath_softc *sc = ic->ic_ifp->if_softc;
3213 struct ath_hal *ah = sc->sc_ah;
3216 *rssi = ath_node_getrssi(ni);
3217 if (ni->ni_chan != IEEE80211_CHAN_ANYC) {
3218 ath_mapchan(&hchan, ni->ni_chan);
3219 *noise = ath_hal_getchannoise(ah, &hchan);
3221 *noise = -95; /* nominally correct */
3225 ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
3227 struct ath_hal *ah = sc->sc_ah;
3230 struct ath_desc *ds;
3235 * NB: by assigning a page to the rx dma buffer we
3236 * implicitly satisfy the Atheros requirement that
3237 * this buffer be cache-line-aligned and sized to be
3238 * multiple of the cache line size. Not doing this
3239 * causes weird stuff to happen (for the 5210 at least).
3241 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3243 DPRINTF(sc, ATH_DEBUG_ANY,
3244 "%s: no mbuf/cluster\n", __func__);
3245 sc->sc_stats.ast_rx_nombuf++;
3248 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
3250 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
3252 bf->bf_segs, &bf->bf_nseg,
3255 DPRINTF(sc, ATH_DEBUG_ANY,
3256 "%s: bus_dmamap_load_mbuf_sg failed; error %d\n",
3258 sc->sc_stats.ast_rx_busdma++;
3262 KASSERT(bf->bf_nseg == 1,
3263 ("multi-segment packet; nseg %u", bf->bf_nseg));
3266 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD);
3269 * Setup descriptors. For receive we always terminate
3270 * the descriptor list with a self-linked entry so we'll
3271 * not get overrun under high load (as can happen with a
3272 * 5212 when ANI processing enables PHY error frames).
3274 * To insure the last descriptor is self-linked we create
3275 * each descriptor as self-linked and add it to the end. As
3276 * each additional descriptor is added the previous self-linked
3277 * entry is ``fixed'' naturally. This should be safe even
3278 * if DMA is happening. When processing RX interrupts we
3279 * never remove/process the last, self-linked, entry on the
3280 * descriptor list. This insures the hardware always has
3281 * someplace to write a new frame.
3284 ds->ds_link = bf->bf_daddr; /* link to self */
3285 ds->ds_data = bf->bf_segs[0].ds_addr;
3286 ath_hal_setuprxdesc(ah, ds
3287 , m->m_len /* buffer size */
3291 if (sc->sc_rxlink != NULL)
3292 *sc->sc_rxlink = bf->bf_daddr;
3293 sc->sc_rxlink = &ds->ds_link;
3298 * Extend 15-bit time stamp from rx descriptor to
3299 * a full 64-bit TSF using the specified TSF.
3301 static __inline u_int64_t
3302 ath_extend_tsf(u_int32_t rstamp, u_int64_t tsf)
3304 if ((tsf & 0x7fff) < rstamp)
3306 return ((tsf &~ 0x7fff) | rstamp);
3310 * Intercept management frames to collect beacon rssi data
3311 * and to do ibss merges.
3314 ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m,
3315 struct ieee80211_node *ni,
3316 int subtype, int rssi, int noise, u_int32_t rstamp)
3318 struct ath_softc *sc = ic->ic_ifp->if_softc;
3321 * Call up first so subsequent work can use information
3322 * potentially stored in the node (e.g. for ibss merge).
3324 sc->sc_recv_mgmt(ic, m, ni, subtype, rssi, noise, rstamp);
3326 case IEEE80211_FC0_SUBTYPE_BEACON:
3327 /* update rssi statistics for use by the hal */
3328 ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi);
3329 if (sc->sc_syncbeacon &&
3330 ni == ic->ic_bss && ic->ic_state == IEEE80211_S_RUN) {
3332 * Resync beacon timers using the tsf of the beacon
3333 * frame we just received.
3335 ath_beacon_config(sc);
3338 case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
3339 if (ic->ic_opmode == IEEE80211_M_IBSS &&
3340 ic->ic_state == IEEE80211_S_RUN) {
3341 u_int64_t tsf = ath_extend_tsf(rstamp,
3342 ath_hal_gettsf64(sc->sc_ah));
3344 * Handle ibss merge as needed; check the tsf on the
3345 * frame before attempting the merge. The 802.11 spec
3346 * says the station should change it's bssid to match
3347 * the oldest station with the same ssid, where oldest
3348 * is determined by the tsf. Note that hardware
3349 * reconfiguration happens through callback to
3350 * ath_newstate as the state machine will go from
3351 * RUN -> RUN when this happens.
3353 if (le64toh(ni->ni_tstamp.tsf) >= tsf) {
3354 DPRINTF(sc, ATH_DEBUG_STATE,
3355 "ibss merge, rstamp %u tsf %ju "
3356 "tstamp %ju\n", rstamp, (uintmax_t)tsf,
3357 (uintmax_t)ni->ni_tstamp.tsf);
3358 (void) ieee80211_ibss_merge(ni);
3366 * Set the default antenna.
3369 ath_setdefantenna(struct ath_softc *sc, u_int antenna)
3371 struct ath_hal *ah = sc->sc_ah;
3373 /* XXX block beacon interrupts */
3374 ath_hal_setdefantenna(ah, antenna);
3375 if (sc->sc_defant != antenna)
3376 sc->sc_stats.ast_ant_defswitch++;
3377 sc->sc_defant = antenna;
3378 sc->sc_rxotherant = 0;
3382 ath_rx_tap(struct ath_softc *sc, struct mbuf *m,
3383 const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
3385 #define CHANNEL_HT (CHANNEL_HT20|CHANNEL_HT40PLUS|CHANNEL_HT40MINUS)
3388 KASSERT(sc->sc_drvbpf != NULL, ("no tap"));
3391 * Discard anything shorter than an ack or cts.
3393 if (m->m_pkthdr.len < IEEE80211_ACK_LEN) {
3394 DPRINTF(sc, ATH_DEBUG_RECV, "%s: runt packet %d\n",
3395 __func__, m->m_pkthdr.len);
3396 sc->sc_stats.ast_rx_tooshort++;
3400 sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate;
3401 sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags;
3402 #if HAL_ABI_VERSION >= 0x07050400
3403 if (sc->sc_curchan.channelFlags & CHANNEL_HT) {
3405 * For HT operation we must specify the channel
3406 * attributes for each packet since they vary.
3407 * We deduce this by from HT40 bit in the rx
3408 * status and the MCS/legacy rate bit.
3410 sc->sc_rx_th.wr_chan_flags &= ~IEEE80211_CHAN_HT;
3411 if (sc->sc_rx_th.wr_rate & 0x80) { /* HT rate */
3413 sc->sc_rx_th.wr_chan_flags |=
3414 (rs->rs_flags & HAL_RX_2040) ?
3415 IEEE80211_CHAN_HT40U : IEEE80211_CHAN_HT20;
3416 if ((rs->rs_flags & HAL_RX_GI) == 0)
3417 sc->sc_rx_th.wr_flags |=
3418 IEEE80211_RADIOTAP_F_SHORTGI;
3422 sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(rs->rs_tstamp, tsf));
3423 if (rs->rs_status & HAL_RXERR_CRC)
3424 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
3425 /* XXX propagate other error flags from descriptor */
3426 sc->sc_rx_th.wr_antsignal = rs->rs_rssi + nf;
3427 sc->sc_rx_th.wr_antnoise = nf;
3428 sc->sc_rx_th.wr_antenna = rs->rs_antenna;
3430 bpf_mtap2(sc->sc_drvbpf, &sc->sc_rx_th, sc->sc_rx_th_len, m);
3437 ath_rx_proc(void *arg, int npending)
3439 #define PA2DESC(_sc, _pa) \
3440 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
3441 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
3442 struct ath_softc *sc = arg;
3444 struct ieee80211com *ic = &sc->sc_ic;
3445 struct ifnet *ifp = sc->sc_ifp;
3446 struct ath_hal *ah = sc->sc_ah;
3447 struct ath_desc *ds;
3448 struct ath_rx_status *rs;
3450 struct ieee80211_node *ni;
3451 struct ath_node *an;
3452 int len, type, ngood;
3459 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
3461 nf = ath_hal_getchannoise(ah, &sc->sc_curchan);
3462 tsf = ath_hal_gettsf64(ah);
3464 bf = STAILQ_FIRST(&sc->sc_rxbuf);
3465 if (bf == NULL) { /* NB: shouldn't happen */
3466 if_printf(ifp, "%s: no buffer!\n", __func__);
3470 if (m == NULL) { /* NB: shouldn't happen */
3472 * If mbuf allocation failed previously there
3473 * will be no mbuf; try again to re-populate it.
3475 /* XXX make debug msg */
3476 if_printf(ifp, "%s: no mbuf!\n", __func__);
3477 STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
3481 if (ds->ds_link == bf->bf_daddr) {
3482 /* NB: never process the self-linked entry at the end */
3485 /* XXX sync descriptor memory */
3487 * Must provide the virtual address of the current
3488 * descriptor, the physical address, and the virtual
3489 * address of the next descriptor in the h/w chain.
3490 * This allows the HAL to look ahead to see if the
3491 * hardware is done with a descriptor by checking the
3492 * done bit in the following descriptor and the address
3493 * of the current descriptor the DMA engine is working
3494 * on. All this is necessary because of our use of
3495 * a self-linked list to avoid rx overruns.
3497 rs = &bf->bf_status.ds_rxstat;
3498 status = ath_hal_rxprocdesc(ah, ds,
3499 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
3501 if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
3502 ath_printrxbuf(bf, 0, status == HAL_OK);
3504 if (status == HAL_EINPROGRESS)
3506 STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
3507 if (rs->rs_status != 0) {
3508 if (rs->rs_status & HAL_RXERR_CRC)
3509 sc->sc_stats.ast_rx_crcerr++;
3510 if (rs->rs_status & HAL_RXERR_FIFO)
3511 sc->sc_stats.ast_rx_fifoerr++;
3512 if (rs->rs_status & HAL_RXERR_PHY) {
3513 sc->sc_stats.ast_rx_phyerr++;
3514 phyerr = rs->rs_phyerr & 0x1f;
3515 sc->sc_stats.ast_rx_phy[phyerr]++;
3516 goto rx_error; /* NB: don't count in ierrors */
3518 if (rs->rs_status & HAL_RXERR_DECRYPT) {
3520 * Decrypt error. If the error occurred
3521 * because there was no hardware key, then
3522 * let the frame through so the upper layers
3523 * can process it. This is necessary for 5210
3524 * parts which have no way to setup a ``clear''
3527 * XXX do key cache faulting
3529 if (rs->rs_keyix == HAL_RXKEYIX_INVALID)
3531 sc->sc_stats.ast_rx_badcrypt++;
3533 if (rs->rs_status & HAL_RXERR_MIC) {
3534 sc->sc_stats.ast_rx_badmic++;
3536 * Do minimal work required to hand off
3537 * the 802.11 header for notifcation.
3539 /* XXX frag's and qos frames */
3540 len = rs->rs_datalen;
3541 if (len >= sizeof (struct ieee80211_frame)) {
3542 bus_dmamap_sync(sc->sc_dmat,
3544 BUS_DMASYNC_POSTREAD);
3545 ieee80211_notify_michael_failure(ic,
3546 mtod(m, struct ieee80211_frame *),
3548 rs->rs_keyix-32 : rs->rs_keyix
3555 * Cleanup any pending partial frame.
3557 if (sc->sc_rxpending != NULL) {
3558 m_freem(sc->sc_rxpending);
3559 sc->sc_rxpending = NULL;
3562 * When a tap is present pass error frames
3563 * that have been requested. By default we
3564 * pass decrypt+mic errors but others may be
3565 * interesting (e.g. crc).
3567 if (bpf_peers_present(sc->sc_drvbpf) &&
3568 (rs->rs_status & sc->sc_monpass)) {
3569 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3570 BUS_DMASYNC_POSTREAD);
3571 /* NB: bpf needs the mbuf length setup */
3572 len = rs->rs_datalen;
3573 m->m_pkthdr.len = m->m_len = len;
3574 (void) ath_rx_tap(sc, m, rs, tsf, nf);
3576 /* XXX pass MIC errors up for s/w reclaculation */
3581 * Sync and unmap the frame. At this point we're
3582 * committed to passing the mbuf somewhere so clear
3583 * bf_m; this means a new mbuf must be allocated
3584 * when the rx descriptor is setup again to receive
3587 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3588 BUS_DMASYNC_POSTREAD);
3589 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3592 len = rs->rs_datalen;
3597 * Frame spans multiple descriptors; save
3598 * it for the next completed descriptor, it
3599 * will be used to construct a jumbogram.
3601 if (sc->sc_rxpending != NULL) {
3602 /* NB: max frame size is currently 2 clusters */
3603 sc->sc_stats.ast_rx_toobig++;
3604 m_freem(sc->sc_rxpending);
3606 m->m_pkthdr.rcvif = ifp;
3607 m->m_pkthdr.len = len;
3608 sc->sc_rxpending = m;
3610 } else if (sc->sc_rxpending != NULL) {
3612 * This is the second part of a jumbogram,
3613 * chain it to the first mbuf, adjust the
3614 * frame length, and clear the rxpending state.
3616 sc->sc_rxpending->m_next = m;
3617 sc->sc_rxpending->m_pkthdr.len += len;
3618 m = sc->sc_rxpending;
3619 sc->sc_rxpending = NULL;
3622 * Normal single-descriptor receive; setup
3623 * the rcvif and packet length.
3625 m->m_pkthdr.rcvif = ifp;
3626 m->m_pkthdr.len = len;
3629 sc->sc_stats.ast_ant_rx[rs->rs_antenna]++;
3631 if (bpf_peers_present(sc->sc_drvbpf) &&
3632 !ath_rx_tap(sc, m, rs, tsf, nf)) {
3633 m_freem(m); /* XXX reclaim */
3638 * From this point on we assume the frame is at least
3639 * as large as ieee80211_frame_min; verify that.
3641 if (len < IEEE80211_MIN_LEN) {
3642 DPRINTF(sc, ATH_DEBUG_RECV, "%s: short packet %d\n",
3644 sc->sc_stats.ast_rx_tooshort++;
3649 if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) {
3650 ieee80211_dump_pkt(ic, mtod(m, caddr_t), len,
3651 sc->sc_hwmap[rs->rs_rate].ieeerate,
3655 m_adj(m, -IEEE80211_CRC_LEN);
3658 * Locate the node for sender, track state, and then
3659 * pass the (referenced) node up to the 802.11 layer
3662 ni = ieee80211_find_rxnode_withkey(ic,
3663 mtod(m, const struct ieee80211_frame_min *),
3664 rs->rs_keyix == HAL_RXKEYIX_INVALID ?
3665 IEEE80211_KEYIX_NONE : rs->rs_keyix);
3667 * Track rx rssi and do any rx antenna management.
3670 ATH_RSSI_LPF(an->an_avgrssi, rs->rs_rssi);
3671 ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi);
3673 * Send frame up for processing.
3675 type = ieee80211_input(ic, m, ni,
3676 rs->rs_rssi, nf, rs->rs_tstamp);
3677 ieee80211_free_node(ni);
3678 if (sc->sc_diversity) {
3680 * When using fast diversity, change the default rx
3681 * antenna if diversity chooses the other antenna 3
3684 if (sc->sc_defant != rs->rs_antenna) {
3685 if (++sc->sc_rxotherant >= 3)
3686 ath_setdefantenna(sc, rs->rs_antenna);
3688 sc->sc_rxotherant = 0;
3690 if (sc->sc_softled) {
3692 * Blink for any data frame. Otherwise do a
3693 * heartbeat-style blink when idle. The latter
3694 * is mainly for station mode where we depend on
3695 * periodic beacon frames to trigger the poll event.
3697 if (type == IEEE80211_FC0_TYPE_DATA) {
3698 sc->sc_rxrate = rs->rs_rate;
3699 ath_led_event(sc, ATH_LED_RX);
3700 } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle)
3701 ath_led_event(sc, ATH_LED_POLL);
3704 * Arrange to update the last rx timestamp only for
3705 * frames from our ap when operating in station mode.
3706 * This assumes the rx key is always setup when associated.
3708 if (ic->ic_opmode == IEEE80211_M_STA &&
3709 rs->rs_keyix != HAL_RXKEYIX_INVALID)
3712 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
3713 } while (ath_rxbuf_init(sc, bf) == 0);
3715 /* rx signal state monitoring */
3716 ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan);
3718 sc->sc_lastrx = tsf;
3720 /* NB: may want to check mgtq too */
3721 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
3722 !IFQ_IS_EMPTY(&ifp->if_snd))
3729 ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
3731 txq->axq_qnum = qnum;
3733 txq->axq_intrcnt = 0;
3734 txq->axq_link = NULL;
3735 STAILQ_INIT(&txq->axq_q);
3736 ATH_TXQ_LOCK_INIT(sc, txq);
3737 TAILQ_INIT(&txq->axq_stageq);
3738 txq->axq_curage = 0;
3742 * Setup a h/w transmit queue.
3744 static struct ath_txq *
3745 ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
3747 #define N(a) (sizeof(a)/sizeof(a[0]))
3748 struct ath_hal *ah = sc->sc_ah;
3752 memset(&qi, 0, sizeof(qi));
3753 qi.tqi_subtype = subtype;
3754 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
3755 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
3756 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
3758 * Enable interrupts only for EOL and DESC conditions.
3759 * We mark tx descriptors to receive a DESC interrupt
3760 * when a tx queue gets deep; otherwise waiting for the
3761 * EOL to reap descriptors. Note that this is done to
3762 * reduce interrupt load and this only defers reaping
3763 * descriptors, never transmitting frames. Aside from
3764 * reducing interrupts this also permits more concurrency.
3765 * The only potential downside is if the tx queue backs
3766 * up in which case the top half of the kernel may backup
3767 * due to a lack of tx descriptors.
3769 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE;
3770 qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
3773 * NB: don't print a message, this happens
3774 * normally on parts with too few tx queues
3778 if (qnum >= N(sc->sc_txq)) {
3779 device_printf(sc->sc_dev,
3780 "hal qnum %u out of range, max %zu!\n",
3781 qnum, N(sc->sc_txq));
3782 ath_hal_releasetxqueue(ah, qnum);
3785 if (!ATH_TXQ_SETUP(sc, qnum)) {
3786 ath_txq_init(sc, &sc->sc_txq[qnum], qnum);
3787 sc->sc_txqsetup |= 1<<qnum;
3789 return &sc->sc_txq[qnum];
3794 * Setup a hardware data transmit queue for the specified
3795 * access control. The hal may not support all requested
3796 * queues in which case it will return a reference to a
3797 * previously setup queue. We record the mapping from ac's
3798 * to h/w queues for use by ath_tx_start and also track
3799 * the set of h/w queues being used to optimize work in the
3800 * transmit interrupt handler and related routines.
3803 ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
3805 #define N(a) (sizeof(a)/sizeof(a[0]))
3806 struct ath_txq *txq;
3808 if (ac >= N(sc->sc_ac2q)) {
3809 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
3810 ac, N(sc->sc_ac2q));
3813 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
3815 sc->sc_ac2q[ac] = txq;
3823 * Update WME parameters for a transmit queue.
3826 ath_txq_update(struct ath_softc *sc, int ac)
3828 #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1)
3829 #define ATH_TXOP_TO_US(v) (v<<5)
3830 struct ieee80211com *ic = &sc->sc_ic;
3831 struct ath_txq *txq = sc->sc_ac2q[ac];
3832 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
3833 struct ath_hal *ah = sc->sc_ah;
3836 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
3837 qi.tqi_aifs = wmep->wmep_aifsn;
3838 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
3839 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
3840 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
3842 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
3843 device_printf(sc->sc_dev, "unable to update hardware queue "
3844 "parameters for %s traffic!\n",
3845 ieee80211_wme_acnames[ac]);
3848 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
3851 #undef ATH_TXOP_TO_US
3852 #undef ATH_EXPONENT_TO_VALUE
3856 * Callback from the 802.11 layer to update WME parameters.
3859 ath_wme_update(struct ieee80211com *ic)
3861 struct ath_softc *sc = ic->ic_ifp->if_softc;
3863 return !ath_txq_update(sc, WME_AC_BE) ||
3864 !ath_txq_update(sc, WME_AC_BK) ||
3865 !ath_txq_update(sc, WME_AC_VI) ||
3866 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0;
3870 * Reclaim resources for a setup queue.
3873 ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
3876 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
3877 ATH_TXQ_LOCK_DESTROY(txq);
3878 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
3882 * Reclaim all tx queue resources.
3885 ath_tx_cleanup(struct ath_softc *sc)
3889 ATH_TXBUF_LOCK_DESTROY(sc);
3890 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
3891 if (ATH_TXQ_SETUP(sc, i))
3892 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
3893 ATH_TXQ_LOCK_DESTROY(&sc->sc_mcastq);
3897 * Return h/w rate index for an IEEE rate (w/o basic rate bit).
3900 ath_tx_findrix(const HAL_RATE_TABLE *rt, int rate)
3904 for (i = 0; i < rt->rateCount; i++)
3905 if ((rt->info[i].dot11Rate & IEEE80211_RATE_VAL) == rate)
3907 return 0; /* NB: lowest rate */
3911 * Reclaim mbuf resources. For fragmented frames we
3912 * need to claim each frag chained with m_nextpkt.
3915 ath_freetx(struct mbuf *m)
3920 next = m->m_nextpkt;
3921 m->m_nextpkt = NULL;
3923 } while ((m = next) != NULL);
3927 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
3933 * Load the DMA map so any coalescing is done. This
3934 * also calculates the number of descriptors we need.
3936 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3937 bf->bf_segs, &bf->bf_nseg,
3939 if (error == EFBIG) {
3940 /* XXX packet requires too many descriptors */
3941 bf->bf_nseg = ATH_TXDESC+1;
3942 } else if (error != 0) {
3943 sc->sc_stats.ast_tx_busdma++;
3948 * Discard null packets and check for packets that
3949 * require too many TX descriptors. We try to convert
3950 * the latter to a cluster.
3952 if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */
3953 sc->sc_stats.ast_tx_linear++;
3954 m = m_collapse(m0, M_DONTWAIT, ATH_TXDESC);
3957 sc->sc_stats.ast_tx_nombuf++;
3961 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3962 bf->bf_segs, &bf->bf_nseg,
3965 sc->sc_stats.ast_tx_busdma++;
3969 KASSERT(bf->bf_nseg <= ATH_TXDESC,
3970 ("too many segments after defrag; nseg %u", bf->bf_nseg));
3971 } else if (bf->bf_nseg == 0) { /* null packet, discard */
3972 sc->sc_stats.ast_tx_nodata++;
3976 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
3977 __func__, m0, m0->m_pkthdr.len);
3978 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3985 ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
3987 struct ath_hal *ah = sc->sc_ah;
3988 struct ath_desc *ds, *ds0;
3992 * Fillin the remainder of the descriptor info.
3994 ds0 = ds = bf->bf_desc;
3995 for (i = 0; i < bf->bf_nseg; i++, ds++) {
3996 ds->ds_data = bf->bf_segs[i].ds_addr;
3997 if (i == bf->bf_nseg - 1)
4000 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1);
4001 ath_hal_filltxdesc(ah, ds
4002 , bf->bf_segs[i].ds_len /* segment length */
4003 , i == 0 /* first segment */
4004 , i == bf->bf_nseg - 1 /* last segment */
4005 , ds0 /* first descriptor */
4007 DPRINTF(sc, ATH_DEBUG_XMIT,
4008 "%s: %d: %08x %08x %08x %08x %08x %08x\n",
4009 __func__, i, ds->ds_link, ds->ds_data,
4010 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]);
4013 * Insert the frame on the outbound list and pass it on
4014 * to the hardware. Multicast frames buffered for power
4015 * save stations and transmit from the CAB queue are stored
4016 * on a s/w only queue and loaded on to the CAB queue in
4017 * the SWBA handler since frames only go out on DTIM and
4018 * to avoid possible races.
4021 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
4022 if (txq != &sc->sc_mcastq) {
4023 if (txq->axq_link == NULL) {
4024 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
4025 DPRINTF(sc, ATH_DEBUG_XMIT,
4026 "%s: TXDP[%u] = %p (%p) depth %d\n", __func__,
4027 txq->axq_qnum, (caddr_t)bf->bf_daddr, bf->bf_desc,
4030 *txq->axq_link = bf->bf_daddr;
4031 DPRINTF(sc, ATH_DEBUG_XMIT,
4032 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
4033 txq->axq_qnum, txq->axq_link,
4034 (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth);
4036 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
4037 ath_hal_txstart(ah, txq->axq_qnum);
4039 if (txq->axq_link != NULL)
4040 *txq->axq_link = bf->bf_daddr;
4041 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
4043 ATH_TXQ_UNLOCK(txq);
4047 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf,
4050 struct ieee80211com *ic = &sc->sc_ic;
4051 struct ath_hal *ah = sc->sc_ah;
4052 struct ifnet *ifp = sc->sc_ifp;
4053 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
4054 int error, iswep, ismcast, isfrag, ismrr;
4055 int keyix, hdrlen, pktlen, try0;
4056 u_int8_t rix, txrate, ctsrate;
4057 u_int8_t cix = 0xff; /* NB: silence compiler */
4058 struct ath_desc *ds;
4059 struct ath_txq *txq;
4060 struct ieee80211_frame *wh;
4061 u_int subtype, flags, ctsduration;
4063 const HAL_RATE_TABLE *rt;
4064 HAL_BOOL shortPreamble;
4065 struct ath_node *an;
4068 wh = mtod(m0, struct ieee80211_frame *);
4069 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
4070 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
4071 isfrag = m0->m_flags & M_FRAG;
4072 hdrlen = ieee80211_anyhdrsize(wh);
4074 * Packet length must not include any
4075 * pad bytes; deduct them here.
4077 pktlen = m0->m_pkthdr.len - (hdrlen & 3);
4080 const struct ieee80211_cipher *cip;
4081 struct ieee80211_key *k;
4084 * Construct the 802.11 header+trailer for an encrypted
4085 * frame. The only reason this can fail is because of an
4086 * unknown or unsupported cipher/key type.
4088 k = ieee80211_crypto_encap(ic, ni, m0);
4091 * This can happen when the key is yanked after the
4092 * frame was queued. Just discard the frame; the
4093 * 802.11 layer counts failures and provides
4094 * debugging/diagnostics.
4100 * Adjust the packet + header lengths for the crypto
4101 * additions and calculate the h/w key index. When
4102 * a s/w mic is done the frame will have had any mic
4103 * added to it prior to entry so m0->m_pkthdr.len will
4104 * account for it. Otherwise we need to add it to the
4108 hdrlen += cip->ic_header;
4109 pktlen += cip->ic_header + cip->ic_trailer;
4110 /* NB: frags always have any TKIP MIC done in s/w */
4111 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
4112 pktlen += cip->ic_miclen;
4113 keyix = k->wk_keyix;
4115 /* packet header may have moved, reset our local pointer */
4116 wh = mtod(m0, struct ieee80211_frame *);
4117 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
4119 * Use station key cache slot, if assigned.
4121 keyix = ni->ni_ucastkey.wk_keyix;
4122 if (keyix == IEEE80211_KEYIX_NONE)
4123 keyix = HAL_TXKEYIX_INVALID;
4125 keyix = HAL_TXKEYIX_INVALID;
4127 pktlen += IEEE80211_CRC_LEN;
4130 * Load the DMA map so any coalescing is done. This
4131 * also calculates the number of descriptors we need.
4133 error = ath_tx_dmasetup(sc, bf, m0);
4136 bf->bf_node = ni; /* NB: held reference */
4137 m0 = bf->bf_m; /* NB: may have changed */
4138 wh = mtod(m0, struct ieee80211_frame *);
4140 /* setup descriptors */
4142 rt = sc->sc_currates;
4143 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
4146 * NB: the 802.11 layer marks whether or not we should
4147 * use short preamble based on the current mode and
4148 * negotiated parameters.
4150 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
4151 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
4152 shortPreamble = AH_TRUE;
4153 sc->sc_stats.ast_tx_shortpre++;
4155 shortPreamble = AH_FALSE;
4159 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
4160 ismrr = 0; /* default no multi-rate retry*/
4162 * Calculate Atheros packet type from IEEE80211 packet header,
4163 * setup for rate calculations, and select h/w transmit queue.
4165 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
4166 case IEEE80211_FC0_TYPE_MGT:
4167 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4168 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
4169 atype = HAL_PKT_TYPE_BEACON;
4170 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
4171 atype = HAL_PKT_TYPE_PROBE_RESP;
4172 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
4173 atype = HAL_PKT_TYPE_ATIM;
4175 atype = HAL_PKT_TYPE_NORMAL; /* XXX */
4176 rix = sc->sc_minrateix;
4177 txrate = rt->info[rix].rateCode;
4179 txrate |= rt->info[rix].shortPreamble;
4180 try0 = ATH_TXMGTTRY;
4181 /* NB: force all management frames to highest queue */
4182 if (ni->ni_flags & IEEE80211_NODE_QOS) {
4183 /* NB: force all management frames to highest queue */
4187 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
4189 case IEEE80211_FC0_TYPE_CTL:
4190 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */
4191 rix = sc->sc_minrateix;
4192 txrate = rt->info[rix].rateCode;
4194 txrate |= rt->info[rix].shortPreamble;
4195 try0 = ATH_TXMGTTRY;
4196 /* NB: force all ctl frames to highest queue */
4197 if (ni->ni_flags & IEEE80211_NODE_QOS) {
4198 /* NB: force all ctl frames to highest queue */
4202 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
4204 case IEEE80211_FC0_TYPE_DATA:
4205 atype = HAL_PKT_TYPE_NORMAL; /* default */
4207 * Data frames: multicast frames go out at a fixed rate,
4208 * otherwise consult the rate control module for the
4213 * Check mcast rate setting in case it's changed.
4214 * XXX move out of fastpath
4216 if (ic->ic_mcast_rate != sc->sc_mcastrate) {
4218 ath_tx_findrix(rt, ic->ic_mcast_rate);
4219 sc->sc_mcastrate = ic->ic_mcast_rate;
4221 rix = sc->sc_mcastrix;
4222 txrate = rt->info[rix].rateCode;
4224 txrate |= rt->info[rix].shortPreamble;
4227 ath_rate_findrate(sc, an, shortPreamble, pktlen,
4228 &rix, &try0, &txrate);
4229 sc->sc_txrate = txrate; /* for LED blinking */
4230 sc->sc_lastdatarix = rix; /* for fast frames */
4231 if (try0 != ATH_TXMAXTRY)
4234 pri = M_WME_GETAC(m0);
4235 if (cap->cap_wmeParams[pri].wmep_noackPolicy)
4236 flags |= HAL_TXDESC_NOACK;
4239 if_printf(ifp, "bogus frame type 0x%x (%s)\n",
4240 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
4245 txq = sc->sc_ac2q[pri];
4248 * When servicing one or more stations in power-save mode
4249 * (or) if there is some mcast data waiting on the mcast
4250 * queue (to prevent out of order delivery) multicast
4251 * frames must be buffered until after the beacon.
4253 if (ismcast && (ic->ic_ps_sta || sc->sc_mcastq.axq_depth)) {
4254 txq = &sc->sc_mcastq;
4255 /* XXX? more bit in 802.11 frame header */
4259 * Calculate miscellaneous flags.
4262 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */
4263 } else if (pktlen > ic->ic_rtsthreshold &&
4264 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
4265 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */
4266 cix = rt->info[rix].controlRate;
4267 sc->sc_stats.ast_tx_rts++;
4269 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */
4270 sc->sc_stats.ast_tx_noack++;
4273 * If 802.11g protection is enabled, determine whether
4274 * to use RTS/CTS or just CTS. Note that this is only
4275 * done for OFDM unicast frames.
4277 if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
4278 rt->info[rix].phy == IEEE80211_T_OFDM &&
4279 (flags & HAL_TXDESC_NOACK) == 0) {
4280 /* XXX fragments must use CCK rates w/ protection */
4281 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
4282 flags |= HAL_TXDESC_RTSENA;
4283 else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
4284 flags |= HAL_TXDESC_CTSENA;
4287 * For frags it would be desirable to use the
4288 * highest CCK rate for RTS/CTS. But stations
4289 * farther away may detect it at a lower CCK rate
4290 * so use the configured protection rate instead
4293 cix = rt->info[sc->sc_protrix].controlRate;
4295 cix = rt->info[sc->sc_protrix].controlRate;
4296 sc->sc_stats.ast_tx_protect++;
4300 * Calculate duration. This logically belongs in the 802.11
4301 * layer but it lacks sufficient information to calculate it.
4303 if ((flags & HAL_TXDESC_NOACK) == 0 &&
4304 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
4307 dur = rt->info[rix].spAckDuration;
4309 dur = rt->info[rix].lpAckDuration;
4310 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
4311 dur += dur; /* additional SIFS+ACK */
4312 KASSERT(m0->m_nextpkt != NULL, ("no fragment"));
4314 * Include the size of next fragment so NAV is
4315 * updated properly. The last fragment uses only
4318 dur += ath_hal_computetxtime(ah, rt,
4319 m0->m_nextpkt->m_pkthdr.len,
4320 rix, shortPreamble);
4324 * Force hardware to use computed duration for next
4325 * fragment by disabling multi-rate retry which updates
4326 * duration based on the multi-rate duration table.
4329 try0 = ATH_TXMGTTRY; /* XXX? */
4331 *(u_int16_t *)wh->i_dur = htole16(dur);
4335 * Calculate RTS/CTS rate and duration if needed.
4338 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
4340 * CTS transmit rate is derived from the transmit rate
4341 * by looking in the h/w rate table. We must also factor
4342 * in whether or not a short preamble is to be used.
4344 /* NB: cix is set above where RTS/CTS is enabled */
4345 KASSERT(cix != 0xff, ("cix not setup"));
4346 ctsrate = rt->info[cix].rateCode;
4348 * Compute the transmit duration based on the frame
4349 * size and the size of an ACK frame. We call into the
4350 * HAL to do the computation since it depends on the
4351 * characteristics of the actual PHY being used.
4353 * NB: CTS is assumed the same size as an ACK so we can
4354 * use the precalculated ACK durations.
4356 if (shortPreamble) {
4357 ctsrate |= rt->info[cix].shortPreamble;
4358 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
4359 ctsduration += rt->info[cix].spAckDuration;
4360 ctsduration += ath_hal_computetxtime(ah,
4361 rt, pktlen, rix, AH_TRUE);
4362 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
4363 ctsduration += rt->info[rix].spAckDuration;
4365 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
4366 ctsduration += rt->info[cix].lpAckDuration;
4367 ctsduration += ath_hal_computetxtime(ah,
4368 rt, pktlen, rix, AH_FALSE);
4369 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
4370 ctsduration += rt->info[rix].lpAckDuration;
4373 * Must disable multi-rate retry when using RTS/CTS.
4376 try0 = ATH_TXMGTTRY; /* XXX */
4381 * At this point we are committed to sending the frame
4382 * and we don't need to look at m_nextpkt; clear it in
4383 * case this frame is part of frag chain.
4385 m0->m_nextpkt = NULL;
4387 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
4388 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
4389 sc->sc_hwmap[txrate].ieeerate, -1);
4391 if (bpf_peers_present(ic->ic_rawbpf))
4392 bpf_mtap(ic->ic_rawbpf, m0);
4393 if (bpf_peers_present(sc->sc_drvbpf)) {
4394 u_int64_t tsf = ath_hal_gettsf64(ah);
4396 sc->sc_tx_th.wt_tsf = htole64(tsf);
4397 sc->sc_tx_th.wt_flags = sc->sc_hwmap[txrate].txflags;
4399 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4401 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
4402 sc->sc_tx_th.wt_rate = sc->sc_hwmap[txrate].ieeerate;
4403 sc->sc_tx_th.wt_txpower = ni->ni_txpower;
4404 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
4406 bpf_mtap2(sc->sc_drvbpf,
4407 &sc->sc_tx_th, sc->sc_tx_th_len, m0);
4411 * Determine if a tx interrupt should be generated for
4412 * this descriptor. We take a tx interrupt to reap
4413 * descriptors when the h/w hits an EOL condition or
4414 * when the descriptor is specifically marked to generate
4415 * an interrupt. We periodically mark descriptors in this
4416 * way to insure timely replenishing of the supply needed
4417 * for sending frames. Defering interrupts reduces system
4418 * load and potentially allows more concurrent work to be
4419 * done but if done to aggressively can cause senders to
4422 * NB: use >= to deal with sc_txintrperiod changing
4423 * dynamically through sysctl.
4425 if (flags & HAL_TXDESC_INTREQ) {
4426 txq->axq_intrcnt = 0;
4427 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
4428 flags |= HAL_TXDESC_INTREQ;
4429 txq->axq_intrcnt = 0;
4433 * Formulate first tx descriptor with tx controls.
4435 /* XXX check return value? */
4436 ath_hal_setuptxdesc(ah, ds
4437 , pktlen /* packet length */
4438 , hdrlen /* header length */
4439 , atype /* Atheros packet type */
4440 , ni->ni_txpower /* txpower */
4441 , txrate, try0 /* series 0 rate/tries */
4442 , keyix /* key cache index */
4443 , sc->sc_txantenna /* antenna mode */
4445 , ctsrate /* rts/cts rate */
4446 , ctsduration /* rts/cts duration */
4448 bf->bf_flags = flags;
4450 * Setup the multi-rate retry state only when we're
4451 * going to use it. This assumes ath_hal_setuptxdesc
4452 * initializes the descriptors (so we don't have to)
4453 * when the hardware supports multi-rate retry and
4457 ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix);
4459 ath_tx_handoff(sc, txq, bf);
4464 * Process completed xmit descriptors from the specified queue.
4467 ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
4469 struct ath_hal *ah = sc->sc_ah;
4470 struct ieee80211com *ic = &sc->sc_ic;
4472 struct ath_desc *ds, *ds0;
4473 struct ath_tx_status *ts;
4474 struct ieee80211_node *ni;
4475 struct ath_node *an;
4476 int sr, lr, pri, nacked;
4479 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
4480 __func__, txq->axq_qnum,
4481 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
4486 txq->axq_intrcnt = 0; /* reset periodic desc intr count */
4487 bf = STAILQ_FIRST(&txq->axq_q);
4489 ATH_TXQ_UNLOCK(txq);
4492 ds0 = &bf->bf_desc[0];
4493 ds = &bf->bf_desc[bf->bf_nseg - 1];
4494 ts = &bf->bf_status.ds_txstat;
4495 status = ath_hal_txprocdesc(ah, ds, ts);
4497 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
4498 ath_printtxbuf(bf, txq->axq_qnum, 0, status == HAL_OK);
4500 if (status == HAL_EINPROGRESS) {
4501 ATH_TXQ_UNLOCK(txq);
4504 ATH_TXQ_REMOVE_HEAD(txq, bf_list);
4505 if (txq->axq_depth == 0)
4506 txq->axq_link = NULL;
4507 ATH_TXQ_UNLOCK(txq);
4512 if (ts->ts_status == 0) {
4513 u_int8_t txant = ts->ts_antenna;
4514 sc->sc_stats.ast_ant_tx[txant]++;
4515 sc->sc_ant_tx[txant]++;
4516 if (ts->ts_rate & HAL_TXSTAT_ALTRATE)
4517 sc->sc_stats.ast_tx_altrate++;
4518 sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
4519 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
4521 pri = M_WME_GETAC(bf->bf_m);
4522 if (pri >= WME_AC_VO)
4523 ic->ic_wme.wme_hipri_traffic++;
4524 ni->ni_inact = ni->ni_inact_reload;
4526 if (ts->ts_status & HAL_TXERR_XRETRY)
4527 sc->sc_stats.ast_tx_xretries++;
4528 if (ts->ts_status & HAL_TXERR_FIFO)
4529 sc->sc_stats.ast_tx_fifoerr++;
4530 if (ts->ts_status & HAL_TXERR_FILT)
4531 sc->sc_stats.ast_tx_filtered++;
4532 if (bf->bf_m->m_flags & M_FF)
4533 sc->sc_stats.ast_ff_txerr++;
4535 sr = ts->ts_shortretry;
4536 lr = ts->ts_longretry;
4537 sc->sc_stats.ast_tx_shortretry += sr;
4538 sc->sc_stats.ast_tx_longretry += lr;
4540 * Hand the descriptor to the rate control algorithm.
4542 if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
4543 (bf->bf_flags & HAL_TXDESC_NOACK) == 0) {
4545 * If frame was ack'd update the last rx time
4546 * used to workaround phantom bmiss interrupts.
4548 if (ts->ts_status == 0)
4550 ath_rate_tx_complete(sc, an, bf);
4553 * Do any tx complete callback. Note this must
4554 * be done before releasing the node reference.
4556 if (bf->bf_m->m_flags & M_TXCB)
4557 ieee80211_process_callback(ni, bf->bf_m,
4560 * Reclaim reference to node.
4562 * NB: the node may be reclaimed here if, for example
4563 * this is a DEAUTH message that was sent and the
4564 * node was timed out due to inactivity.
4566 ieee80211_free_node(ni);
4568 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4569 BUS_DMASYNC_POSTWRITE);
4570 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4577 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
4578 ATH_TXBUF_UNLOCK(sc);
4581 * Flush fast-frame staging queue when traffic slows.
4583 if (txq->axq_depth <= 1)
4584 ath_ff_stageq_flush(sc, txq, ath_ff_always);
4589 txqactive(struct ath_hal *ah, int qnum)
4591 u_int32_t txqs = 1<<qnum;
4592 ath_hal_gettxintrtxqs(ah, &txqs);
4593 return (txqs & (1<<qnum));
4597 * Deferred processing of transmit interrupt; special-cased
4598 * for a single hardware transmit queue (e.g. 5210 and 5211).
4601 ath_tx_proc_q0(void *arg, int npending)
4603 struct ath_softc *sc = arg;
4604 struct ifnet *ifp = sc->sc_ifp;
4606 if (txqactive(sc->sc_ah, 0) && ath_tx_processq(sc, &sc->sc_txq[0]))
4607 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4608 if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
4609 ath_tx_processq(sc, sc->sc_cabq);
4610 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4614 ath_led_event(sc, ATH_LED_TX);
4620 * Deferred processing of transmit interrupt; special-cased
4621 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
4624 ath_tx_proc_q0123(void *arg, int npending)
4626 struct ath_softc *sc = arg;
4627 struct ifnet *ifp = sc->sc_ifp;
4631 * Process each active queue.
4634 if (txqactive(sc->sc_ah, 0))
4635 nacked += ath_tx_processq(sc, &sc->sc_txq[0]);
4636 if (txqactive(sc->sc_ah, 1))
4637 nacked += ath_tx_processq(sc, &sc->sc_txq[1]);
4638 if (txqactive(sc->sc_ah, 2))
4639 nacked += ath_tx_processq(sc, &sc->sc_txq[2]);
4640 if (txqactive(sc->sc_ah, 3))
4641 nacked += ath_tx_processq(sc, &sc->sc_txq[3]);
4642 if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
4643 ath_tx_processq(sc, sc->sc_cabq);
4645 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4647 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4651 ath_led_event(sc, ATH_LED_TX);
4657 * Deferred processing of transmit interrupt.
4660 ath_tx_proc(void *arg, int npending)
4662 struct ath_softc *sc = arg;
4663 struct ifnet *ifp = sc->sc_ifp;
4667 * Process each active queue.
4670 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4671 if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
4672 nacked += ath_tx_processq(sc, &sc->sc_txq[i]);
4674 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4676 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4680 ath_led_event(sc, ATH_LED_TX);
4686 ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
4689 struct ath_hal *ah = sc->sc_ah;
4691 struct ieee80211_node *ni;
4696 * NB: this assumes output has been stopped and
4697 * we do not need to block ath_tx_tasklet
4699 for (ix = 0;; ix++) {
4701 bf = STAILQ_FIRST(&txq->axq_q);
4703 txq->axq_link = NULL;
4704 ATH_TXQ_UNLOCK(txq);
4707 ATH_TXQ_REMOVE_HEAD(txq, bf_list);
4708 ATH_TXQ_UNLOCK(txq);
4710 if (sc->sc_debug & ATH_DEBUG_RESET) {
4711 ath_printtxbuf(bf, txq->axq_qnum, ix,
4712 ath_hal_txprocdesc(ah, bf->bf_desc,
4713 &bf->bf_status.ds_txstat) == HAL_OK);
4714 ieee80211_dump_pkt(&sc->sc_ic, mtod(bf->bf_m, caddr_t),
4715 bf->bf_m->m_len, 0, -1);
4717 #endif /* ATH_DEBUG */
4718 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4723 * Do any callback and reclaim the node reference.
4725 if (bf->bf_m->m_flags & M_TXCB)
4726 ieee80211_process_callback(ni, bf->bf_m, -1);
4727 ieee80211_free_node(ni);
4733 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
4734 ATH_TXBUF_UNLOCK(sc);
4739 ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
4741 struct ath_hal *ah = sc->sc_ah;
4743 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
4744 __func__, txq->axq_qnum,
4745 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum),
4747 (void) ath_hal_stoptxdma(ah, txq->axq_qnum);
4751 * Drain the transmit queues and reclaim resources.
4754 ath_draintxq(struct ath_softc *sc)
4756 struct ath_hal *ah = sc->sc_ah;
4757 struct ifnet *ifp = sc->sc_ifp;
4760 /* XXX return value */
4761 if (!sc->sc_invalid) {
4762 /* don't touch the hardware if marked invalid */
4763 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
4764 __func__, sc->sc_bhalq,
4765 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq),
4767 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
4768 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4769 if (ATH_TXQ_SETUP(sc, i))
4770 ath_tx_stopdma(sc, &sc->sc_txq[i]);
4772 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4773 if (ATH_TXQ_SETUP(sc, i))
4774 ath_tx_draintxq(sc, &sc->sc_txq[i]);
4775 ath_tx_draintxq(sc, &sc->sc_mcastq);
4777 if (sc->sc_debug & ATH_DEBUG_RESET) {
4778 struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf);
4779 if (bf != NULL && bf->bf_m != NULL) {
4780 ath_printtxbuf(bf, sc->sc_bhalq, 0,
4781 ath_hal_txprocdesc(ah, bf->bf_desc,
4782 &bf->bf_status.ds_txstat) == HAL_OK);
4783 ieee80211_dump_pkt(&sc->sc_ic, mtod(bf->bf_m, caddr_t),
4784 bf->bf_m->m_len, 0, -1);
4787 #endif /* ATH_DEBUG */
4788 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4793 * Disable the receive h/w in preparation for a reset.
4796 ath_stoprecv(struct ath_softc *sc)
4798 #define PA2DESC(_sc, _pa) \
4799 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
4800 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
4801 struct ath_hal *ah = sc->sc_ah;
4803 ath_hal_stoppcurecv(ah); /* disable PCU */
4804 ath_hal_setrxfilter(ah, 0); /* clear recv filter */
4805 ath_hal_stopdmarecv(ah); /* disable DMA engine */
4806 DELAY(3000); /* 3ms is long enough for 1 frame */
4808 if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) {
4812 printf("%s: rx queue %p, link %p\n", __func__,
4813 (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink);
4815 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
4816 struct ath_desc *ds = bf->bf_desc;
4817 struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
4818 HAL_STATUS status = ath_hal_rxprocdesc(ah, ds,
4819 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
4820 if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL))
4821 ath_printrxbuf(bf, ix, status == HAL_OK);
4826 if (sc->sc_rxpending != NULL) {
4827 m_freem(sc->sc_rxpending);
4828 sc->sc_rxpending = NULL;
4830 sc->sc_rxlink = NULL; /* just in case */
4835 * Enable the receive h/w following a reset.
4838 ath_startrecv(struct ath_softc *sc)
4840 struct ath_hal *ah = sc->sc_ah;
4843 sc->sc_rxlink = NULL;
4844 sc->sc_rxpending = NULL;
4845 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
4846 int error = ath_rxbuf_init(sc, bf);
4848 DPRINTF(sc, ATH_DEBUG_RECV,
4849 "%s: ath_rxbuf_init failed %d\n",
4855 bf = STAILQ_FIRST(&sc->sc_rxbuf);
4856 ath_hal_putrxbuf(ah, bf->bf_daddr);
4857 ath_hal_rxena(ah); /* enable recv descriptors */
4858 ath_mode_init(sc); /* set filters, etc. */
4859 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */
4864 * Update internal state after a channel change.
4867 ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
4869 enum ieee80211_phymode mode;
4872 * Change channels and update the h/w rate map
4873 * if we're switching; e.g. 11a to 11b/g.
4875 if (IEEE80211_IS_CHAN_HALF(chan))
4876 mode = IEEE80211_MODE_HALF;
4877 else if (IEEE80211_IS_CHAN_QUARTER(chan))
4878 mode = IEEE80211_MODE_QUARTER;
4880 mode = ieee80211_chan2mode(chan);
4881 if (mode != sc->sc_curmode)
4882 ath_setcurmode(sc, mode);
4884 sc->sc_rx_th.wr_chan_flags = htole32(chan->ic_flags);
4885 sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags;
4886 sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
4887 sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq;
4888 sc->sc_rx_th.wr_chan_ieee = chan->ic_ieee;
4889 sc->sc_tx_th.wt_chan_ieee = sc->sc_rx_th.wr_chan_ieee;
4890 sc->sc_rx_th.wr_chan_maxpow = chan->ic_maxregpower;
4891 sc->sc_tx_th.wt_chan_maxpow = sc->sc_rx_th.wr_chan_maxpow;
4895 * Set/change channels. If the channel is really being changed,
4896 * it's done by reseting the chip. To accomplish this we must
4897 * first cleanup any pending DMA, then restart stuff after a la
4901 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
4903 struct ath_hal *ah = sc->sc_ah;
4904 struct ieee80211com *ic = &sc->sc_ic;
4908 * Convert to a HAL channel description with
4909 * the flags constrained to reflect the current
4912 ath_mapchan(&hchan, chan);
4914 DPRINTF(sc, ATH_DEBUG_RESET,
4915 "%s: %u (%u MHz, hal flags 0x%x) -> %u (%u MHz, hal flags 0x%x)\n",
4917 ath_hal_mhz2ieee(ah, sc->sc_curchan.channel,
4918 sc->sc_curchan.channelFlags),
4919 sc->sc_curchan.channel, sc->sc_curchan.channelFlags,
4920 ath_hal_mhz2ieee(ah, hchan.channel, hchan.channelFlags),
4921 hchan.channel, hchan.channelFlags);
4922 if (hchan.channel != sc->sc_curchan.channel ||
4923 hchan.channelFlags != sc->sc_curchan.channelFlags) {
4927 * To switch channels clear any pending DMA operations;
4928 * wait long enough for the RX fifo to drain, reset the
4929 * hardware at the new frequency, and then re-enable
4930 * the relevant bits of the h/w.
4932 ath_hal_intrset(ah, 0); /* disable interrupts */
4933 ath_draintxq(sc); /* clear pending tx frames */
4934 ath_stoprecv(sc); /* turn off frame recv */
4935 if (!ath_hal_reset(ah, sc->sc_opmode, &hchan, AH_TRUE, &status)) {
4936 if_printf(ic->ic_ifp, "%s: unable to reset "
4937 "channel %u (%u Mhz, flags 0x%x hal flags 0x%x), "
4938 "hal status %u\n", __func__,
4939 ieee80211_chan2ieee(ic, chan), chan->ic_freq,
4940 chan->ic_flags, hchan.channelFlags, status);
4943 sc->sc_curchan = hchan;
4944 ath_update_txpow(sc); /* update tx power state */
4945 sc->sc_diversity = ath_hal_getdiversity(ah);
4946 sc->sc_calinterval = 1;
4947 sc->sc_caltries = 0;
4950 * Re-enable rx framework.
4952 if (ath_startrecv(sc) != 0) {
4953 if_printf(ic->ic_ifp,
4954 "%s: unable to restart recv logic\n", __func__);
4959 * Change channels and update the h/w rate map
4960 * if we're switching; e.g. 11a to 11b/g.
4962 ath_chan_change(sc, chan);
4965 * Re-enable interrupts.
4967 ath_hal_intrset(ah, sc->sc_imask);
4973 * Periodically recalibrate the PHY to account
4974 * for temperature/environment changes.
4977 ath_calibrate(void *arg)
4979 struct ath_softc *sc = arg;
4980 struct ath_hal *ah = sc->sc_ah;
4983 sc->sc_stats.ast_per_cal++;
4985 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
4987 * Rfgain is out of bounds, reset the chip
4988 * to load new gain values.
4990 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
4991 "%s: rfgain change\n", __func__);
4992 sc->sc_stats.ast_per_rfgain++;
4993 ath_reset(sc->sc_ifp);
4995 if (!ath_hal_calibrate(ah, &sc->sc_curchan, &iqCalDone)) {
4996 DPRINTF(sc, ATH_DEBUG_ANY,
4997 "%s: calibration of channel %u failed\n",
4998 __func__, sc->sc_curchan.channel);
4999 sc->sc_stats.ast_per_calfail++;
5002 * Calibrate noise floor data again in case of change.
5004 ath_hal_process_noisefloor(ah);
5006 * Poll more frequently when the IQ calibration is in
5007 * progress to speedup loading the final settings.
5008 * We temper this aggressive polling with an exponential
5009 * back off after 4 tries up to ath_calinterval.
5011 if (iqCalDone || sc->sc_calinterval >= ath_calinterval) {
5012 sc->sc_caltries = 0;
5013 sc->sc_calinterval = ath_calinterval;
5014 } else if (sc->sc_caltries > 4) {
5015 sc->sc_caltries = 0;
5016 sc->sc_calinterval <<= 1;
5017 if (sc->sc_calinterval > ath_calinterval)
5018 sc->sc_calinterval = ath_calinterval;
5020 KASSERT(0 < sc->sc_calinterval && sc->sc_calinterval <= ath_calinterval,
5021 ("bad calibration interval %u", sc->sc_calinterval));
5023 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5024 "%s: next +%u (%siqCalDone tries %u)\n", __func__,
5025 sc->sc_calinterval, iqCalDone ? "" : "!", sc->sc_caltries);
5027 callout_reset(&sc->sc_cal_ch, sc->sc_calinterval * hz,
5032 ath_scan_start(struct ieee80211com *ic)
5034 struct ifnet *ifp = ic->ic_ifp;
5035 struct ath_softc *sc = ifp->if_softc;
5036 struct ath_hal *ah = sc->sc_ah;
5039 /* XXX calibration timer? */
5041 sc->sc_scanning = 1;
5042 sc->sc_syncbeacon = 0;
5043 rfilt = ath_calcrxfilter(sc);
5044 ath_hal_setrxfilter(ah, rfilt);
5045 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0);
5047 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
5048 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr));
5052 ath_scan_end(struct ieee80211com *ic)
5054 struct ifnet *ifp = ic->ic_ifp;
5055 struct ath_softc *sc = ifp->if_softc;
5056 struct ath_hal *ah = sc->sc_ah;
5059 sc->sc_scanning = 0;
5060 rfilt = ath_calcrxfilter(sc);
5061 ath_hal_setrxfilter(ah, rfilt);
5062 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5064 ath_hal_process_noisefloor(ah);
5066 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5067 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
5072 ath_set_channel(struct ieee80211com *ic)
5074 struct ifnet *ifp = ic->ic_ifp;
5075 struct ath_softc *sc = ifp->if_softc;
5077 (void) ath_chan_set(sc, ic->ic_curchan);
5079 * If we are returning to our bss channel then mark state
5080 * so the next recv'd beacon's tsf will be used to sync the
5081 * beacon timers. Note that since we only hear beacons in
5082 * sta/ibss mode this has no effect in other operating modes.
5084 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
5085 sc->sc_syncbeacon = 1;
5089 ath_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
5091 struct ifnet *ifp = ic->ic_ifp;
5092 struct ath_softc *sc = ifp->if_softc;
5093 struct ath_hal *ah = sc->sc_ah;
5094 struct ieee80211_node *ni;
5095 int i, error, stamode;
5097 static const HAL_LED_STATE leds[] = {
5098 HAL_LED_INIT, /* IEEE80211_S_INIT */
5099 HAL_LED_SCAN, /* IEEE80211_S_SCAN */
5100 HAL_LED_AUTH, /* IEEE80211_S_AUTH */
5101 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */
5102 HAL_LED_RUN, /* IEEE80211_S_CAC */
5103 HAL_LED_RUN, /* IEEE80211_S_RUN */
5104 HAL_LED_RUN, /* IEEE80211_S_CSA */
5105 HAL_LED_RUN, /* IEEE80211_S_SLEEP */
5108 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
5109 ieee80211_state_name[ic->ic_state],
5110 ieee80211_state_name[nstate]);
5112 callout_stop(&sc->sc_cal_ch);
5113 ath_hal_setledstate(ah, leds[nstate]); /* set LED */
5115 if (nstate == IEEE80211_S_INIT) {
5117 * Shutdown host/driver operation:
5118 * o disable interrupts so we don't rx frames
5119 * o clean any pending items on the task q
5120 * o notify the rate control algorithm
5122 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5123 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
5125 /* XXX can't use taskqueue_drain 'cuz we're holding sc_mtx */
5126 taskqueue_drain(sc->sc_tq, &sc->sc_rxtask);
5127 taskqueue_drain(sc->sc_tq, &sc->sc_rxorntask);
5128 taskqueue_drain(sc->sc_tq, &sc->sc_bmisstask);
5129 taskqueue_drain(sc->sc_tq, &sc->sc_bstucktask);
5131 ath_rate_newstate(sc, nstate);
5136 rfilt = ath_calcrxfilter(sc);
5137 stamode = (sc->sc_opmode == HAL_M_STA || sc->sc_opmode == HAL_M_IBSS);
5138 if (stamode && nstate == IEEE80211_S_RUN) {
5139 sc->sc_curaid = ni->ni_associd;
5140 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
5144 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5145 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
5148 ath_hal_setrxfilter(ah, rfilt);
5150 ath_hal_setassocid(ah, sc->sc_curbssid, ni->ni_associd);
5152 if (ic->ic_opmode != IEEE80211_M_STA &&
5153 (ic->ic_flags & IEEE80211_F_PRIVACY)) {
5154 for (i = 0; i < IEEE80211_WEP_NKID; i++)
5155 if (ath_hal_keyisvalid(ah, i))
5156 ath_hal_keysetmac(ah, i, ni->ni_bssid);
5160 * Notify the rate control algorithm so rates
5161 * are setup should ath_beacon_alloc be called.
5163 ath_rate_newstate(sc, nstate);
5165 if (nstate == IEEE80211_S_RUN) {
5166 DPRINTF(sc, ATH_DEBUG_STATE,
5167 "%s(RUN): ic_flags=0x%08x iv=%d bssid=%s "
5168 "capinfo=0x%04x chan=%d\n"
5172 , ether_sprintf(ni->ni_bssid)
5174 , ieee80211_chan2ieee(ic, ic->ic_curchan));
5176 switch (ic->ic_opmode) {
5177 case IEEE80211_M_HOSTAP:
5178 case IEEE80211_M_IBSS:
5180 * Allocate and setup the beacon frame.
5182 * Stop any previous beacon DMA. This may be
5183 * necessary, for example, when an ibss merge
5184 * causes reconfiguration; there will be a state
5185 * transition from RUN->RUN that means we may
5186 * be called with beacon transmission active.
5188 ath_hal_stoptxdma(ah, sc->sc_bhalq);
5189 ath_beacon_free(sc);
5190 error = ath_beacon_alloc(sc, ni);
5194 * If joining an adhoc network defer beacon timer
5195 * configuration to the next beacon frame so we
5196 * have a current TSF to use. Otherwise we're
5197 * starting an ibss/bss so there's no need to delay.
5199 if (ic->ic_opmode == IEEE80211_M_IBSS &&
5200 ic->ic_bss->ni_tstamp.tsf != 0)
5201 sc->sc_syncbeacon = 1;
5203 ath_beacon_config(sc);
5205 case IEEE80211_M_STA:
5207 * Allocate a key cache slot to the station.
5209 if ((ic->ic_flags & IEEE80211_F_PRIVACY) == 0 &&
5211 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
5212 ath_setup_stationkey(ni);
5214 * Defer beacon timer configuration to the next
5215 * beacon frame so we have a current TSF to use
5216 * (any TSF collected when scanning is likely old).
5218 sc->sc_syncbeacon = 1;
5224 * Let the hal process statistics collected during a
5225 * scan so it can provide calibrated noise floor data.
5227 ath_hal_process_noisefloor(ah);
5229 * Reset rssi stats; maybe not the best place...
5231 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
5232 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
5233 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
5236 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
5237 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5241 * Invoke the parent method to complete the work.
5243 error = sc->sc_newstate(ic, nstate, arg);
5245 * Finally, start any timers.
5247 if (nstate == IEEE80211_S_RUN) {
5248 /* start periodic recalibration timer */
5249 callout_reset(&sc->sc_cal_ch, sc->sc_calinterval * hz,
5257 * Allocate a key cache slot to the station so we can
5258 * setup a mapping from key index to node. The key cache
5259 * slot is needed for managing antenna state and for
5260 * compression when stations do not use crypto. We do
5261 * it uniliaterally here; if crypto is employed this slot
5262 * will be reassigned.
5265 ath_setup_stationkey(struct ieee80211_node *ni)
5267 struct ieee80211com *ic = ni->ni_ic;
5268 struct ath_softc *sc = ic->ic_ifp->if_softc;
5269 ieee80211_keyix keyix, rxkeyix;
5271 if (!ath_key_alloc(ic, &ni->ni_ucastkey, &keyix, &rxkeyix)) {
5273 * Key cache is full; we'll fall back to doing
5274 * the more expensive lookup in software. Note
5275 * this also means no h/w compression.
5277 /* XXX msg+statistic */
5280 ni->ni_ucastkey.wk_keyix = keyix;
5281 ni->ni_ucastkey.wk_rxkeyix = rxkeyix;
5282 /* NB: this will create a pass-thru key entry */
5283 ath_keyset(sc, &ni->ni_ucastkey, ni->ni_macaddr, ic->ic_bss);
5288 * Setup driver-specific state for a newly associated node.
5289 * Note that we're called also on a re-associate, the isnew
5290 * param tells us if this is the first time or not.
5293 ath_newassoc(struct ieee80211_node *ni, int isnew)
5295 struct ieee80211com *ic = ni->ni_ic;
5296 struct ath_softc *sc = ic->ic_ifp->if_softc;
5298 ath_rate_newassoc(sc, ATH_NODE(ni), isnew);
5300 (ic->ic_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey) {
5301 KASSERT(ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE,
5302 ("new assoc with a unicast key already setup (keyix %u)",
5303 ni->ni_ucastkey.wk_keyix));
5304 ath_setup_stationkey(ni);
5309 ath_getchannels(struct ath_softc *sc,
5310 HAL_REG_DOMAIN rd, HAL_CTRY_CODE cc, HAL_BOOL outdoor, HAL_BOOL xchanmode)
5312 struct ieee80211com *ic = &sc->sc_ic;
5313 struct ifnet *ifp = sc->sc_ifp;
5314 struct ath_hal *ah = sc->sc_ah;
5317 u_int32_t regdomain;
5319 chans = malloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL),
5321 if (chans == NULL) {
5322 if_printf(ifp, "unable to allocate channel table\n");
5325 if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX, &nchan,
5326 NULL, 0, NULL, cc, HAL_MODE_ALL, outdoor, xchanmode)) {
5327 (void) ath_hal_getregdomain(ah, ®domain);
5328 if_printf(ifp, "unable to collect channel list from hal; "
5329 "regdomain likely %u country code %u\n", regdomain, cc);
5330 free(chans, M_TEMP);
5335 * Convert HAL channels to ieee80211 ones.
5337 memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
5338 for (i = 0; i < nchan; i++) {
5339 HAL_CHANNEL *c = &chans[i];
5340 struct ieee80211_channel *ichan = &ic->ic_channels[i];
5342 ichan->ic_ieee = ath_hal_mhz2ieee(ah, c->channel,
5345 if_printf(ifp, "hal channel %u/%x -> %u\n",
5346 c->channel, c->channelFlags, ichan->ic_ieee);
5347 ichan->ic_freq = c->channel;
5349 if ((c->channelFlags & CHANNEL_PUREG) == CHANNEL_PUREG) {
5351 * Except for AR5211, HAL's PUREG means mixed
5354 ichan->ic_flags = c->channelFlags &~ CHANNEL_PUREG;
5355 ichan->ic_flags |= IEEE80211_CHAN_G;
5357 ichan->ic_flags = c->channelFlags;
5360 if (ath_hal_isgsmsku(ah)) {
5361 /* remap to true frequencies */
5362 ichan->ic_freq = 922 + (2422 - ichan->ic_freq);
5363 ichan->ic_flags |= IEEE80211_CHAN_GSM;
5364 ichan->ic_ieee = ieee80211_mhz2ieee(ichan->ic_freq,
5367 ichan->ic_maxregpower = c->maxRegTxPower; /* dBm */
5368 ichan->ic_maxpower = c->maxTxPower; /* 1/2 dBm */
5369 ichan->ic_minpower = c->minTxPower; /* 1/2 dBm */
5371 ic->ic_nchans = nchan;
5372 free(chans, M_TEMP);
5373 (void) ath_hal_getregdomain(ah, &sc->sc_regdomain);
5374 ath_hal_getcountrycode(ah, &sc->sc_countrycode);
5375 sc->sc_xchanmode = xchanmode;
5376 sc->sc_outdoor = outdoor;
5381 ath_led_done(void *arg)
5383 struct ath_softc *sc = arg;
5385 sc->sc_blinking = 0;
5389 * Turn the LED off: flip the pin and then set a timer so no
5390 * update will happen for the specified duration.
5393 ath_led_off(void *arg)
5395 struct ath_softc *sc = arg;
5397 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon);
5398 callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, ath_led_done, sc);
5402 * Blink the LED according to the specified on/off times.
5405 ath_led_blink(struct ath_softc *sc, int on, int off)
5407 DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off);
5408 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon);
5409 sc->sc_blinking = 1;
5410 sc->sc_ledoff = off;
5411 callout_reset(&sc->sc_ledtimer, on, ath_led_off, sc);
5415 ath_led_event(struct ath_softc *sc, int event)
5418 sc->sc_ledevent = ticks; /* time of last event */
5419 if (sc->sc_blinking) /* don't interrupt active blink */
5423 ath_led_blink(sc, sc->sc_hwmap[0].ledon,
5424 sc->sc_hwmap[0].ledoff);
5427 ath_led_blink(sc, sc->sc_hwmap[sc->sc_txrate].ledon,
5428 sc->sc_hwmap[sc->sc_txrate].ledoff);
5431 ath_led_blink(sc, sc->sc_hwmap[sc->sc_rxrate].ledon,
5432 sc->sc_hwmap[sc->sc_rxrate].ledoff);
5438 ath_update_txpow(struct ath_softc *sc)
5440 struct ieee80211com *ic = &sc->sc_ic;
5441 struct ath_hal *ah = sc->sc_ah;
5444 if (sc->sc_curtxpow != ic->ic_txpowlimit) {
5445 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
5446 /* read back in case value is clamped */
5447 if (ath_hal_gettxpowlimit(ah, &txpow))
5448 ic->ic_txpowlimit = sc->sc_curtxpow = txpow;
5451 * Fetch max tx power level for status requests.
5453 if (ath_hal_getmaxtxpow(sc->sc_ah, &txpow))
5454 ic->ic_bss->ni_txpower = txpow;
5458 ath_rate_setup(struct ath_softc *sc, u_int mode)
5460 struct ath_hal *ah = sc->sc_ah;
5461 const HAL_RATE_TABLE *rt;
5464 case IEEE80211_MODE_11A:
5465 rt = ath_hal_getratetable(ah, HAL_MODE_11A);
5467 case IEEE80211_MODE_HALF:
5468 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
5470 case IEEE80211_MODE_QUARTER:
5471 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
5473 case IEEE80211_MODE_11B:
5474 rt = ath_hal_getratetable(ah, HAL_MODE_11B);
5476 case IEEE80211_MODE_11G:
5477 rt = ath_hal_getratetable(ah, HAL_MODE_11G);
5479 case IEEE80211_MODE_TURBO_A:
5480 rt = ath_hal_getratetable(ah, HAL_MODE_108A);
5481 #if HAL_ABI_VERSION < 0x07013100
5482 if (rt == NULL) /* XXX bandaid for old hal's */
5483 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
5486 case IEEE80211_MODE_TURBO_G:
5487 rt = ath_hal_getratetable(ah, HAL_MODE_108G);
5489 case IEEE80211_MODE_STURBO_A:
5490 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
5492 case IEEE80211_MODE_11NA:
5493 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20);
5495 case IEEE80211_MODE_11NG:
5496 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20);
5499 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
5503 sc->sc_rates[mode] = rt;
5504 return (rt != NULL);
5508 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
5510 #define N(a) (sizeof(a)/sizeof(a[0]))
5511 /* NB: on/off times from the Atheros NDIS driver, w/ permission */
5512 static const struct {
5513 u_int rate; /* tx/rx 802.11 rate */
5514 u_int16_t timeOn; /* LED on time (ms) */
5515 u_int16_t timeOff; /* LED off time (ms) */
5531 /* XXX half/quarter rates */
5533 const HAL_RATE_TABLE *rt;
5536 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
5537 rt = sc->sc_rates[mode];
5538 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
5539 for (i = 0; i < rt->rateCount; i++)
5540 sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i;
5541 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
5542 for (i = 0; i < 32; i++) {
5543 u_int8_t ix = rt->rateCodeToIndex[i];
5545 sc->sc_hwmap[i].ledon = (500 * hz) / 1000;
5546 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000;
5549 sc->sc_hwmap[i].ieeerate =
5550 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
5551 if (rt->info[ix].phy == IEEE80211_T_HT)
5552 sc->sc_hwmap[i].ieeerate |= 0x80; /* MCS */
5553 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
5554 if (rt->info[ix].shortPreamble ||
5555 rt->info[ix].phy == IEEE80211_T_OFDM)
5556 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE;
5557 /* NB: receive frames include FCS */
5558 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags |
5559 IEEE80211_RADIOTAP_F_FCS;
5560 /* setup blink rate table to avoid per-packet lookup */
5561 for (j = 0; j < N(blinkrates)-1; j++)
5562 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
5564 /* NB: this uses the last entry if the rate isn't found */
5565 /* XXX beware of overlow */
5566 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000;
5567 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000;
5569 sc->sc_currates = rt;
5570 sc->sc_curmode = mode;
5572 * All protection frames are transmited at 2Mb/s for
5573 * 11g, otherwise at 1Mb/s.
5575 if (mode == IEEE80211_MODE_11G)
5576 sc->sc_protrix = ath_tx_findrix(rt, 2*2);
5578 sc->sc_protrix = ath_tx_findrix(rt, 2*1);
5579 /* rate index used to send management frames */
5580 sc->sc_minrateix = 0;
5582 * Setup multicast rate state.
5584 /* XXX layering violation */
5585 sc->sc_mcastrix = ath_tx_findrix(rt, sc->sc_ic.ic_mcast_rate);
5586 sc->sc_mcastrate = sc->sc_ic.ic_mcast_rate;
5587 /* NB: caller is responsible for reseting rate control state */
5593 ath_printrxbuf(const struct ath_buf *bf, u_int ix, int done)
5595 const struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
5596 const struct ath_desc *ds;
5599 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) {
5600 printf("R[%2u] (DS.V:%p DS.P:%p) L:%08x D:%08x%s\n"
5601 " %08x %08x %08x %08x\n",
5602 ix, ds, (const struct ath_desc *)bf->bf_daddr + i,
5603 ds->ds_link, ds->ds_data,
5604 !done ? "" : (rs->rs_status == 0) ? " *" : " !",
5605 ds->ds_ctl0, ds->ds_ctl1,
5606 ds->ds_hw[0], ds->ds_hw[1]);
5611 ath_printtxbuf(const struct ath_buf *bf, u_int qnum, u_int ix, int done)
5613 const struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
5614 const struct ath_desc *ds;
5617 printf("Q%u[%3u]", qnum, ix);
5618 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) {
5619 printf(" (DS.V:%p DS.P:%p) L:%08x D:%08x F:04%x%s\n"
5620 " %08x %08x %08x %08x %08x %08x\n",
5621 ds, (const struct ath_desc *)bf->bf_daddr + i,
5622 ds->ds_link, ds->ds_data, bf->bf_flags,
5623 !done ? "" : (ts->ts_status == 0) ? " *" : " !",
5624 ds->ds_ctl0, ds->ds_ctl1,
5625 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3]);
5628 #endif /* ATH_DEBUG */
5631 ath_watchdog(struct ifnet *ifp)
5633 struct ath_softc *sc = ifp->if_softc;
5635 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
5636 if_printf(ifp, "device timeout\n");
5639 sc->sc_stats.ast_watchdog++;
5645 * Diagnostic interface to the HAL. This is used by various
5646 * tools to do things like retrieve register contents for
5647 * debugging. The mechanism is intentionally opaque so that
5648 * it can change frequently w/o concern for compatiblity.
5651 ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
5653 struct ath_hal *ah = sc->sc_ah;
5654 u_int id = ad->ad_id & ATH_DIAG_ID;
5655 void *indata = NULL;
5656 void *outdata = NULL;
5657 u_int32_t insize = ad->ad_in_size;
5658 u_int32_t outsize = ad->ad_out_size;
5661 if (ad->ad_id & ATH_DIAG_IN) {
5665 indata = malloc(insize, M_TEMP, M_NOWAIT);
5666 if (indata == NULL) {
5670 error = copyin(ad->ad_in_data, indata, insize);
5674 if (ad->ad_id & ATH_DIAG_DYN) {
5676 * Allocate a buffer for the results (otherwise the HAL
5677 * returns a pointer to a buffer where we can read the
5678 * results). Note that we depend on the HAL leaving this
5679 * pointer for us to use below in reclaiming the buffer;
5680 * may want to be more defensive.
5682 outdata = malloc(outsize, M_TEMP, M_NOWAIT);
5683 if (outdata == NULL) {
5688 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
5689 if (outsize < ad->ad_out_size)
5690 ad->ad_out_size = outsize;
5691 if (outdata != NULL)
5692 error = copyout(outdata, ad->ad_out_data,
5698 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
5699 free(indata, M_TEMP);
5700 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL)
5701 free(outdata, M_TEMP);
5704 #endif /* ATH_DIAGAPI */
5707 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
5709 #define IS_RUNNING(ifp) \
5710 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
5711 struct ath_softc *sc = ifp->if_softc;
5712 struct ieee80211com *ic = &sc->sc_ic;
5713 struct ifreq *ifr = (struct ifreq *)data;
5719 if (IS_RUNNING(ifp)) {
5721 * To avoid rescanning another access point,
5722 * do not call ath_init() here. Instead,
5723 * only reflect promisc mode settings.
5726 } else if (ifp->if_flags & IFF_UP) {
5728 * Beware of being called during attach/detach
5729 * to reset promiscuous mode. In that case we
5730 * will still be marked UP but not RUNNING.
5731 * However trying to re-init the interface
5732 * is the wrong thing to do as we've already
5733 * torn down much of our state. There's
5734 * probably a better way to deal with this.
5736 if (!sc->sc_invalid && ic->ic_bss != NULL)
5737 ath_init(sc); /* XXX lose error */
5739 ath_stop_locked(ifp);
5744 * The upper layer has already installed/removed
5745 * the multicast address(es), just recalculate the
5746 * multicast filter for the card.
5748 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
5752 /* NB: embed these numbers to get a consistent view */
5753 sc->sc_stats.ast_tx_packets = ifp->if_opackets;
5754 sc->sc_stats.ast_rx_packets = ifp->if_ipackets;
5755 ieee80211_getsignal(ic, &sc->sc_stats.ast_rx_rssi,
5756 &sc->sc_stats.ast_rx_noise);
5757 sc->sc_stats.ast_tx_rate = sc->sc_hwmap[sc->sc_txrate].ieeerate;
5760 * NB: Drop the softc lock in case of a page fault;
5761 * we'll accept any potential inconsisentcy in the
5762 * statistics. The alternative is to copy the data
5763 * to a local structure.
5765 return copyout(&sc->sc_stats,
5766 ifr->ifr_data, sizeof (sc->sc_stats));
5770 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr);
5775 error = ieee80211_ioctl(ic, cmd, data);
5776 if (error == ENETRESET) {
5777 if (IS_RUNNING(ifp) &&
5778 ic->ic_roaming != IEEE80211_ROAMING_MANUAL)
5779 ath_init(sc); /* XXX lose error */
5782 if (error == ERESTART)
5783 error = IS_RUNNING(ifp) ? ath_reset(ifp) : 0;
5792 ath_sysctl_slottime(SYSCTL_HANDLER_ARGS)
5794 struct ath_softc *sc = arg1;
5795 u_int slottime = ath_hal_getslottime(sc->sc_ah);
5798 error = sysctl_handle_int(oidp, &slottime, 0, req);
5799 if (error || !req->newptr)
5801 return !ath_hal_setslottime(sc->sc_ah, slottime) ? EINVAL : 0;
5805 ath_sysctl_acktimeout(SYSCTL_HANDLER_ARGS)
5807 struct ath_softc *sc = arg1;
5808 u_int acktimeout = ath_hal_getacktimeout(sc->sc_ah);
5811 error = sysctl_handle_int(oidp, &acktimeout, 0, req);
5812 if (error || !req->newptr)
5814 return !ath_hal_setacktimeout(sc->sc_ah, acktimeout) ? EINVAL : 0;
5818 ath_sysctl_ctstimeout(SYSCTL_HANDLER_ARGS)
5820 struct ath_softc *sc = arg1;
5821 u_int ctstimeout = ath_hal_getctstimeout(sc->sc_ah);
5824 error = sysctl_handle_int(oidp, &ctstimeout, 0, req);
5825 if (error || !req->newptr)
5827 return !ath_hal_setctstimeout(sc->sc_ah, ctstimeout) ? EINVAL : 0;
5831 ath_sysctl_softled(SYSCTL_HANDLER_ARGS)
5833 struct ath_softc *sc = arg1;
5834 int softled = sc->sc_softled;
5837 error = sysctl_handle_int(oidp, &softled, 0, req);
5838 if (error || !req->newptr)
5840 softled = (softled != 0);
5841 if (softled != sc->sc_softled) {
5843 /* NB: handle any sc_ledpin change */
5844 ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin);
5845 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin,
5848 sc->sc_softled = softled;
5854 ath_sysctl_ledpin(SYSCTL_HANDLER_ARGS)
5856 struct ath_softc *sc = arg1;
5857 int ledpin = sc->sc_ledpin;
5860 error = sysctl_handle_int(oidp, &ledpin, 0, req);
5861 if (error || !req->newptr)
5863 if (ledpin != sc->sc_ledpin) {
5864 sc->sc_ledpin = ledpin;
5865 if (sc->sc_softled) {
5866 ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin);
5867 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin,
5875 ath_sysctl_txantenna(SYSCTL_HANDLER_ARGS)
5877 struct ath_softc *sc = arg1;
5878 u_int txantenna = ath_hal_getantennaswitch(sc->sc_ah);
5881 error = sysctl_handle_int(oidp, &txantenna, 0, req);
5882 if (!error && req->newptr) {
5883 /* XXX assumes 2 antenna ports */
5884 if (txantenna < HAL_ANT_VARIABLE || txantenna > HAL_ANT_FIXED_B)
5886 ath_hal_setantennaswitch(sc->sc_ah, txantenna);
5888 * NB: with the switch locked this isn't meaningful,
5889 * but set it anyway so things like radiotap get
5890 * consistent info in their data.
5892 sc->sc_txantenna = txantenna;
5898 ath_sysctl_rxantenna(SYSCTL_HANDLER_ARGS)
5900 struct ath_softc *sc = arg1;
5901 u_int defantenna = ath_hal_getdefantenna(sc->sc_ah);
5904 error = sysctl_handle_int(oidp, &defantenna, 0, req);
5905 if (!error && req->newptr)
5906 ath_hal_setdefantenna(sc->sc_ah, defantenna);
5911 ath_sysctl_diversity(SYSCTL_HANDLER_ARGS)
5913 struct ath_softc *sc = arg1;
5914 u_int diversity = ath_hal_getdiversity(sc->sc_ah);
5917 error = sysctl_handle_int(oidp, &diversity, 0, req);
5918 if (error || !req->newptr)
5920 if (!ath_hal_setdiversity(sc->sc_ah, diversity))
5922 sc->sc_diversity = diversity;
5927 ath_sysctl_diag(SYSCTL_HANDLER_ARGS)
5929 struct ath_softc *sc = arg1;
5933 if (!ath_hal_getdiag(sc->sc_ah, &diag))
5935 error = sysctl_handle_int(oidp, &diag, 0, req);
5936 if (error || !req->newptr)
5938 return !ath_hal_setdiag(sc->sc_ah, diag) ? EINVAL : 0;
5942 ath_sysctl_tpscale(SYSCTL_HANDLER_ARGS)
5944 struct ath_softc *sc = arg1;
5945 struct ifnet *ifp = sc->sc_ifp;
5949 (void) ath_hal_gettpscale(sc->sc_ah, &scale);
5950 error = sysctl_handle_int(oidp, &scale, 0, req);
5951 if (error || !req->newptr)
5953 return !ath_hal_settpscale(sc->sc_ah, scale) ? EINVAL :
5954 (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
5958 ath_sysctl_tpc(SYSCTL_HANDLER_ARGS)
5960 struct ath_softc *sc = arg1;
5961 u_int tpc = ath_hal_gettpc(sc->sc_ah);
5964 error = sysctl_handle_int(oidp, &tpc, 0, req);
5965 if (error || !req->newptr)
5967 return !ath_hal_settpc(sc->sc_ah, tpc) ? EINVAL : 0;
5971 ath_sysctl_rfkill(SYSCTL_HANDLER_ARGS)
5973 struct ath_softc *sc = arg1;
5974 struct ifnet *ifp = sc->sc_ifp;
5975 struct ath_hal *ah = sc->sc_ah;
5976 u_int rfkill = ath_hal_getrfkill(ah);
5979 error = sysctl_handle_int(oidp, &rfkill, 0, req);
5980 if (error || !req->newptr)
5982 if (rfkill == ath_hal_getrfkill(ah)) /* unchanged */
5984 if (!ath_hal_setrfkill(ah, rfkill))
5986 return (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
5990 ath_sysctl_rfsilent(SYSCTL_HANDLER_ARGS)
5992 struct ath_softc *sc = arg1;
5996 (void) ath_hal_getrfsilent(sc->sc_ah, &rfsilent);
5997 error = sysctl_handle_int(oidp, &rfsilent, 0, req);
5998 if (error || !req->newptr)
6000 if (!ath_hal_setrfsilent(sc->sc_ah, rfsilent))
6002 sc->sc_rfsilentpin = rfsilent & 0x1c;
6003 sc->sc_rfsilentpol = (rfsilent & 0x2) != 0;
6008 ath_sysctl_countrycode(SYSCTL_HANDLER_ARGS)
6010 struct ath_softc *sc = arg1;
6011 u_int32_t cc = sc->sc_countrycode;
6012 struct ieee80211com *ic = &sc->sc_ic;
6015 error = sysctl_handle_int(oidp, &cc, 0, req);
6016 if (error || !req->newptr)
6018 error = ath_getchannels(sc, sc->sc_regdomain, cc,
6019 sc->sc_outdoor != 0, sc->sc_xchanmode != 0);
6022 ieee80211_media_init(ic, ath_media_change, ieee80211_media_status);
6028 ath_sysctl_regdomain(SYSCTL_HANDLER_ARGS)
6030 struct ath_softc *sc = arg1;
6031 u_int32_t rd = sc->sc_regdomain;
6032 struct ieee80211com *ic = &sc->sc_ic;
6035 error = sysctl_handle_int(oidp, &rd, 0, req);
6036 if (error || !req->newptr)
6038 if (!ath_hal_setregdomain(sc->sc_ah, rd))
6040 error = ath_getchannels(sc, rd, sc->sc_countrycode,
6041 sc->sc_outdoor != 0, sc->sc_xchanmode != 0);
6044 ieee80211_media_init(ic, ath_media_change, ieee80211_media_status);
6050 ath_sysctl_tpack(SYSCTL_HANDLER_ARGS)
6052 struct ath_softc *sc = arg1;
6056 (void) ath_hal_gettpack(sc->sc_ah, &tpack);
6057 error = sysctl_handle_int(oidp, &tpack, 0, req);
6058 if (error || !req->newptr)
6060 return !ath_hal_settpack(sc->sc_ah, tpack) ? EINVAL : 0;
6064 ath_sysctl_tpcts(SYSCTL_HANDLER_ARGS)
6066 struct ath_softc *sc = arg1;
6070 (void) ath_hal_gettpcts(sc->sc_ah, &tpcts);
6071 error = sysctl_handle_int(oidp, &tpcts, 0, req);
6072 if (error || !req->newptr)
6074 return !ath_hal_settpcts(sc->sc_ah, tpcts) ? EINVAL : 0;
6078 ath_sysctlattach(struct ath_softc *sc)
6080 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
6081 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
6082 struct ath_hal *ah = sc->sc_ah;
6084 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6085 "countrycode", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6086 ath_sysctl_countrycode, "I", "country code");
6087 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6088 "regdomain", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6089 ath_sysctl_regdomain, "I", "EEPROM regdomain code");
6091 sc->sc_debug = ath_debug;
6092 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6093 "debug", CTLFLAG_RW, &sc->sc_debug, 0,
6094 "control debugging printfs");
6096 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6097 "slottime", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6098 ath_sysctl_slottime, "I", "802.11 slot time (us)");
6099 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6100 "acktimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6101 ath_sysctl_acktimeout, "I", "802.11 ACK timeout (us)");
6102 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6103 "ctstimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6104 ath_sysctl_ctstimeout, "I", "802.11 CTS timeout (us)");
6105 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6106 "softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6107 ath_sysctl_softled, "I", "enable/disable software LED support");
6108 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6109 "ledpin", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6110 ath_sysctl_ledpin, "I", "GPIO pin connected to LED");
6111 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6112 "ledon", CTLFLAG_RW, &sc->sc_ledon, 0,
6113 "setting to turn LED on");
6114 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6115 "ledidle", CTLFLAG_RW, &sc->sc_ledidle, 0,
6116 "idle time for inactivity LED (ticks)");
6117 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6118 "txantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6119 ath_sysctl_txantenna, "I", "antenna switch");
6120 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6121 "rxantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6122 ath_sysctl_rxantenna, "I", "default/rx antenna");
6123 if (ath_hal_hasdiversity(ah))
6124 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6125 "diversity", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6126 ath_sysctl_diversity, "I", "antenna diversity");
6127 sc->sc_txintrperiod = ATH_TXINTR_PERIOD;
6128 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6129 "txintrperiod", CTLFLAG_RW, &sc->sc_txintrperiod, 0,
6130 "tx descriptor batching");
6131 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6132 "diag", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6133 ath_sysctl_diag, "I", "h/w diagnostic control");
6134 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6135 "tpscale", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6136 ath_sysctl_tpscale, "I", "tx power scaling");
6137 if (ath_hal_hastpc(ah)) {
6138 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6139 "tpc", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6140 ath_sysctl_tpc, "I", "enable/disable per-packet TPC");
6141 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6142 "tpack", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6143 ath_sysctl_tpack, "I", "tx power for ack frames");
6144 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6145 "tpcts", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6146 ath_sysctl_tpcts, "I", "tx power for cts frames");
6148 if (ath_hal_hasfastframes(sc->sc_ah)) {
6149 sc->sc_fftxqmin = ATH_FF_TXQMIN;
6150 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6151 "fftxqmin", CTLFLAG_RW, &sc->sc_fftxqmin, 0,
6152 "min frames before fast-frame staging");
6153 sc->sc_fftxqmax = ATH_FF_TXQMAX;
6154 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6155 "fftxqmax", CTLFLAG_RW, &sc->sc_fftxqmax, 0,
6156 "max queued frames before tail drop");
6158 if (ath_hal_hasrfsilent(ah)) {
6159 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6160 "rfsilent", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6161 ath_sysctl_rfsilent, "I", "h/w RF silent config");
6162 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6163 "rfkill", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6164 ath_sysctl_rfkill, "I", "enable/disable RF kill switch");
6166 sc->sc_monpass = HAL_RXERR_DECRYPT | HAL_RXERR_MIC;
6167 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6168 "monpass", CTLFLAG_RW, &sc->sc_monpass, 0,
6169 "mask of error frames to pass when monitoring");
6173 ath_bpfattach(struct ath_softc *sc)
6175 struct ifnet *ifp = sc->sc_ifp;
6177 bpfattach2(ifp, DLT_IEEE802_11_RADIO,
6178 sizeof(struct ieee80211_frame) + sizeof(sc->sc_tx_th),
6181 * Initialize constant fields.
6182 * XXX make header lengths a multiple of 32-bits so subsequent
6183 * headers are properly aligned; this is a kludge to keep
6184 * certain applications happy.
6186 * NB: the channel is setup each time we transition to the
6187 * RUN state to avoid filling it in for each frame.
6189 sc->sc_tx_th_len = roundup(sizeof(sc->sc_tx_th), sizeof(u_int32_t));
6190 sc->sc_tx_th.wt_ihdr.it_len = htole16(sc->sc_tx_th_len);
6191 sc->sc_tx_th.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT);
6193 sc->sc_rx_th_len = roundup(sizeof(sc->sc_rx_th), sizeof(u_int32_t));
6194 sc->sc_rx_th.wr_ihdr.it_len = htole16(sc->sc_rx_th_len);
6195 sc->sc_rx_th.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT);
6199 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
6200 struct ath_buf *bf, struct mbuf *m0,
6201 const struct ieee80211_bpf_params *params)
6203 struct ieee80211com *ic = &sc->sc_ic;
6204 struct ath_hal *ah = sc->sc_ah;
6205 int error, ismcast, ismrr;
6206 int hdrlen, pktlen, try0, txantenna;
6207 u_int8_t rix, cix, txrate, ctsrate, rate1, rate2, rate3;
6208 struct ath_txq *txq;
6209 struct ieee80211_frame *wh;
6210 u_int flags, ctsduration;
6212 const HAL_RATE_TABLE *rt;
6213 struct ath_desc *ds;
6216 wh = mtod(m0, struct ieee80211_frame *);
6217 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
6218 hdrlen = ieee80211_anyhdrsize(wh);
6220 * Packet length must not include any
6221 * pad bytes; deduct them here.
6223 /* XXX honor IEEE80211_BPF_DATAPAD */
6224 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
6226 error = ath_tx_dmasetup(sc, bf, m0);
6229 m0 = bf->bf_m; /* NB: may have changed */
6230 wh = mtod(m0, struct ieee80211_frame *);
6231 bf->bf_node = ni; /* NB: held reference */
6233 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
6234 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
6235 if (params->ibp_flags & IEEE80211_BPF_RTS)
6236 flags |= HAL_TXDESC_RTSENA;
6237 else if (params->ibp_flags & IEEE80211_BPF_CTS)
6238 flags |= HAL_TXDESC_CTSENA;
6239 /* XXX leave ismcast to injector? */
6240 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
6241 flags |= HAL_TXDESC_NOACK;
6243 rt = sc->sc_currates;
6244 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
6245 rix = ath_tx_findrix(rt, params->ibp_rate0);
6246 txrate = rt->info[rix].rateCode;
6247 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
6248 txrate |= rt->info[rix].shortPreamble;
6249 sc->sc_txrate = txrate;
6250 try0 = params->ibp_try0;
6251 ismrr = (params->ibp_try1 != 0);
6252 txantenna = params->ibp_pri >> 2;
6253 if (txantenna == 0) /* XXX? */
6254 txantenna = sc->sc_txantenna;
6256 if (flags & (HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA)) {
6257 cix = ath_tx_findrix(rt, params->ibp_ctsrate);
6258 ctsrate = rt->info[cix].rateCode;
6259 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) {
6260 ctsrate |= rt->info[cix].shortPreamble;
6261 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
6262 ctsduration += rt->info[cix].spAckDuration;
6263 ctsduration += ath_hal_computetxtime(ah,
6264 rt, pktlen, rix, AH_TRUE);
6265 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
6266 ctsduration += rt->info[rix].spAckDuration;
6268 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
6269 ctsduration += rt->info[cix].lpAckDuration;
6270 ctsduration += ath_hal_computetxtime(ah,
6271 rt, pktlen, rix, AH_FALSE);
6272 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
6273 ctsduration += rt->info[rix].lpAckDuration;
6275 ismrr = 0; /* XXX */
6278 pri = params->ibp_pri & 3;
6280 * NB: we mark all packets as type PSPOLL so the h/w won't
6281 * set the sequence number, duration, etc.
6283 atype = HAL_PKT_TYPE_PSPOLL;
6285 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
6286 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
6287 sc->sc_hwmap[txrate].ieeerate, -1);
6289 if (bpf_peers_present(ic->ic_rawbpf))
6290 bpf_mtap(ic->ic_rawbpf, m0);
6291 if (bpf_peers_present(sc->sc_drvbpf)) {
6292 u_int64_t tsf = ath_hal_gettsf64(ah);
6294 sc->sc_tx_th.wt_tsf = htole64(tsf);
6295 sc->sc_tx_th.wt_flags = sc->sc_hwmap[txrate].txflags;
6296 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
6297 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
6298 sc->sc_tx_th.wt_rate = sc->sc_hwmap[txrate].ieeerate;
6299 sc->sc_tx_th.wt_txpower = ni->ni_txpower;
6300 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
6302 bpf_mtap2(sc->sc_drvbpf,
6303 &sc->sc_tx_th, sc->sc_tx_th_len, m0);
6307 * Formulate first tx descriptor with tx controls.
6310 /* XXX check return value? */
6311 ath_hal_setuptxdesc(ah, ds
6312 , pktlen /* packet length */
6313 , hdrlen /* header length */
6314 , atype /* Atheros packet type */
6315 , params->ibp_power /* txpower */
6316 , txrate, try0 /* series 0 rate/tries */
6317 , HAL_TXKEYIX_INVALID /* key cache index */
6318 , txantenna /* antenna mode */
6320 , ctsrate /* rts/cts rate */
6321 , ctsduration /* rts/cts duration */
6323 bf->bf_flags = flags;
6326 rix = ath_tx_findrix(rt, params->ibp_rate1);
6327 rate1 = rt->info[rix].rateCode;
6328 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
6329 rate1 |= rt->info[rix].shortPreamble;
6330 if (params->ibp_try2) {
6331 rix = ath_tx_findrix(rt, params->ibp_rate2);
6332 rate2 = rt->info[rix].rateCode;
6333 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
6334 rate2 |= rt->info[rix].shortPreamble;
6337 if (params->ibp_try3) {
6338 rix = ath_tx_findrix(rt, params->ibp_rate3);
6339 rate3 = rt->info[rix].rateCode;
6340 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
6341 rate3 |= rt->info[rix].shortPreamble;
6344 ath_hal_setupxtxdesc(ah, ds
6345 , rate1, params->ibp_try1 /* series 1 */
6346 , rate2, params->ibp_try2 /* series 2 */
6347 , rate3, params->ibp_try3 /* series 3 */
6352 * When servicing one or more stations in power-save mode
6353 * (or) if there is some mcast data waiting on the mcast
6354 * queue (to prevent out of order delivery) multicast
6355 * frames must be buffered until after the beacon.
6357 txq = sc->sc_ac2q[pri];
6358 if (ismcast && (ic->ic_ps_sta || sc->sc_mcastq.axq_depth))
6359 txq = &sc->sc_mcastq;
6360 ath_tx_handoff(sc, txq, bf);
6365 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
6366 const struct ieee80211_bpf_params *params)
6368 struct ieee80211com *ic = ni->ni_ic;
6369 struct ifnet *ifp = ic->ic_ifp;
6370 struct ath_softc *sc = ifp->if_softc;
6373 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
6374 ieee80211_free_node(ni);
6379 * Grab a TX buffer and associated resources.
6382 bf = STAILQ_FIRST(&sc->sc_txbuf);
6384 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
6385 ATH_TXBUF_UNLOCK(sc);
6387 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: out of xmit buffers\n",
6389 sc->sc_stats.ast_tx_qstop++;
6390 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
6391 ieee80211_free_node(ni);
6397 sc->sc_stats.ast_tx_raw++;
6399 if (params == NULL) {
6401 * Legacy path; interpret frame contents to decide
6402 * precisely how to send the frame.
6404 if (ath_tx_start(sc, ni, bf, m))
6408 * Caller supplied explicit parameters to use in
6409 * sending the frame.
6411 if (ath_tx_raw_start(sc, ni, bf, m, params))
6420 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
6421 ATH_TXBUF_UNLOCK(sc);
6422 ieee80211_free_node(ni);
6423 return EIO; /* XXX */
6427 * Announce various information on device/driver attach.
6430 ath_announce(struct ath_softc *sc)
6432 #define HAL_MODE_DUALBAND (HAL_MODE_11A|HAL_MODE_11B)
6433 struct ifnet *ifp = sc->sc_ifp;
6434 struct ath_hal *ah = sc->sc_ah;
6437 if_printf(ifp, "mac %d.%d phy %d.%d",
6438 ah->ah_macVersion, ah->ah_macRev,
6439 ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
6441 * Print radio revision(s). We check the wireless modes
6442 * to avoid falsely printing revs for inoperable parts.
6443 * Dual-band radio revs are returned in the 5Ghz rev number.
6445 ath_hal_getcountrycode(ah, &cc);
6446 modes = ath_hal_getwirelessmodes(ah, cc);
6447 if ((modes & HAL_MODE_DUALBAND) == HAL_MODE_DUALBAND) {
6448 if (ah->ah_analog5GhzRev && ah->ah_analog2GhzRev)
6449 printf(" 5ghz radio %d.%d 2ghz radio %d.%d",
6450 ah->ah_analog5GhzRev >> 4,
6451 ah->ah_analog5GhzRev & 0xf,
6452 ah->ah_analog2GhzRev >> 4,
6453 ah->ah_analog2GhzRev & 0xf);
6455 printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4,
6456 ah->ah_analog5GhzRev & 0xf);
6458 printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4,
6459 ah->ah_analog5GhzRev & 0xf);
6463 for (i = 0; i <= WME_AC_VO; i++) {
6464 struct ath_txq *txq = sc->sc_ac2q[i];
6465 if_printf(ifp, "Use hw queue %u for %s traffic\n",
6466 txq->axq_qnum, ieee80211_wme_acnames[i]);
6468 if_printf(ifp, "Use hw queue %u for CAB traffic\n",
6469 sc->sc_cabq->axq_qnum);
6470 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq);
6472 if (ath_rxbuf != ATH_RXBUF)
6473 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf);
6474 if (ath_txbuf != ATH_TXBUF)
6475 if_printf(ifp, "using %u tx buffers\n", ath_txbuf);
6476 #undef HAL_MODE_DUALBAND