2 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14 * redistribution must be conditioned upon including a substantially
15 * similar Disclaimer requirement for further binary redistribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGES.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 * Driver for the Marvell 88W8363 Wireless LAN controller.
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sysctl.h>
46 #include <sys/malloc.h>
48 #include <sys/mutex.h>
49 #include <sys/kernel.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/errno.h>
53 #include <sys/callout.h>
55 #include <sys/endian.h>
56 #include <sys/kthread.h>
57 #include <sys/taskqueue.h>
59 #include <machine/bus.h>
62 #include <net/if_dl.h>
63 #include <net/if_media.h>
64 #include <net/if_types.h>
65 #include <net/if_arp.h>
66 #include <net/ethernet.h>
67 #include <net/if_llc.h>
71 #include <net80211/ieee80211_var.h>
72 #include <net80211/ieee80211_regdomain.h>
75 #include <netinet/in.h>
76 #include <netinet/if_ether.h>
79 #include <dev/mwl/if_mwlvar.h>
80 #include <dev/mwl/mwldiag.h>
82 /* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
83 #define MS(v,x) (((v) & x) >> x##_S)
84 #define SM(v,x) (((v) << x##_S) & x)
86 static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
87 const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
88 const uint8_t [IEEE80211_ADDR_LEN],
89 const uint8_t [IEEE80211_ADDR_LEN]);
90 static void mwl_vap_delete(struct ieee80211vap *);
91 static int mwl_setupdma(struct mwl_softc *);
92 static int mwl_hal_reset(struct mwl_softc *sc);
93 static int mwl_init_locked(struct mwl_softc *);
94 static void mwl_init(void *);
95 static void mwl_stop_locked(struct ifnet *, int);
96 static int mwl_reset(struct ieee80211vap *, u_long);
97 static void mwl_stop(struct ifnet *, int);
98 static void mwl_start(struct ifnet *);
99 static int mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
100 const struct ieee80211_bpf_params *);
101 static int mwl_media_change(struct ifnet *);
102 static void mwl_watchdog(void *);
103 static int mwl_ioctl(struct ifnet *, u_long, caddr_t);
104 static void mwl_radar_proc(void *, int);
105 static void mwl_chanswitch_proc(void *, int);
106 static void mwl_bawatchdog_proc(void *, int);
107 static int mwl_key_alloc(struct ieee80211vap *,
108 struct ieee80211_key *,
109 ieee80211_keyix *, ieee80211_keyix *);
110 static int mwl_key_delete(struct ieee80211vap *,
111 const struct ieee80211_key *);
112 static int mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *,
113 const uint8_t mac[IEEE80211_ADDR_LEN]);
114 static int mwl_mode_init(struct mwl_softc *);
115 static void mwl_update_mcast(struct ifnet *);
116 static void mwl_update_promisc(struct ifnet *);
117 static void mwl_updateslot(struct ifnet *);
118 static int mwl_beacon_setup(struct ieee80211vap *);
119 static void mwl_beacon_update(struct ieee80211vap *, int);
120 #ifdef MWL_HOST_PS_SUPPORT
121 static void mwl_update_ps(struct ieee80211vap *, int);
122 static int mwl_set_tim(struct ieee80211_node *, int);
124 static int mwl_dma_setup(struct mwl_softc *);
125 static void mwl_dma_cleanup(struct mwl_softc *);
126 static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
127 const uint8_t [IEEE80211_ADDR_LEN]);
128 static void mwl_node_cleanup(struct ieee80211_node *);
129 static void mwl_node_drain(struct ieee80211_node *);
130 static void mwl_node_getsignal(const struct ieee80211_node *,
132 static void mwl_node_getmimoinfo(const struct ieee80211_node *,
133 struct ieee80211_mimo_info *);
134 static int mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
135 static void mwl_rx_proc(void *, int);
136 static void mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
137 static int mwl_tx_setup(struct mwl_softc *, int, int);
138 static int mwl_wme_update(struct ieee80211com *);
139 static void mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
140 static void mwl_tx_cleanup(struct mwl_softc *);
141 static uint16_t mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
142 static int mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
143 struct mwl_txbuf *, struct mbuf *);
144 static void mwl_tx_proc(void *, int);
145 static int mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
146 static void mwl_draintxq(struct mwl_softc *);
147 static void mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
148 static int mwl_recv_action(struct ieee80211_node *,
149 const struct ieee80211_frame *,
150 const uint8_t *, const uint8_t *);
151 static int mwl_addba_request(struct ieee80211_node *,
152 struct ieee80211_tx_ampdu *, int dialogtoken,
153 int baparamset, int batimeout);
154 static int mwl_addba_response(struct ieee80211_node *,
155 struct ieee80211_tx_ampdu *, int status,
156 int baparamset, int batimeout);
157 static void mwl_addba_stop(struct ieee80211_node *,
158 struct ieee80211_tx_ampdu *);
159 static int mwl_startrecv(struct mwl_softc *);
160 static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
161 struct ieee80211_channel *);
162 static int mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
163 static void mwl_scan_start(struct ieee80211com *);
164 static void mwl_scan_end(struct ieee80211com *);
165 static void mwl_set_channel(struct ieee80211com *);
166 static int mwl_peerstadb(struct ieee80211_node *,
167 int aid, int staid, MWL_HAL_PEERINFO *pi);
168 static int mwl_localstadb(struct ieee80211vap *);
169 static int mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
170 static int allocstaid(struct mwl_softc *sc, int aid);
171 static void delstaid(struct mwl_softc *sc, int staid);
172 static void mwl_newassoc(struct ieee80211_node *, int);
173 static void mwl_agestations(void *);
174 static int mwl_setregdomain(struct ieee80211com *,
175 struct ieee80211_regdomain *, int,
176 struct ieee80211_channel []);
177 static void mwl_getradiocaps(struct ieee80211com *, int, int *,
178 struct ieee80211_channel []);
179 static int mwl_getchannels(struct mwl_softc *);
181 static void mwl_sysctlattach(struct mwl_softc *);
182 static void mwl_announce(struct mwl_softc *);
184 SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
186 static int mwl_rxdesc = MWL_RXDESC; /* # rx desc's to allocate */
187 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
188 0, "rx descriptors allocated");
189 static int mwl_rxbuf = MWL_RXBUF; /* # rx buffers to allocate */
190 SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RW, &mwl_rxbuf,
191 0, "rx buffers allocated");
192 TUNABLE_INT("hw.mwl.rxbuf", &mwl_rxbuf);
193 static int mwl_txbuf = MWL_TXBUF; /* # tx buffers to allocate */
194 SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RW, &mwl_txbuf,
195 0, "tx buffers allocated");
196 TUNABLE_INT("hw.mwl.txbuf", &mwl_txbuf);
197 static int mwl_txcoalesce = 8; /* # tx packets to q before poking f/w*/
198 SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RW, &mwl_txcoalesce,
199 0, "tx buffers to send at once");
200 TUNABLE_INT("hw.mwl.txcoalesce", &mwl_txcoalesce);
201 static int mwl_rxquota = MWL_RXBUF; /* # max buffers to process */
202 SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RW, &mwl_rxquota,
203 0, "max rx buffers to process per interrupt");
204 TUNABLE_INT("hw.mwl.rxquota", &mwl_rxquota);
205 static int mwl_rxdmalow = 3; /* # min buffers for wakeup */
206 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RW, &mwl_rxdmalow,
207 0, "min free rx buffers before restarting traffic");
208 TUNABLE_INT("hw.mwl.rxdmalow", &mwl_rxdmalow);
211 static int mwl_debug = 0;
212 SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RW, &mwl_debug,
213 0, "control debugging printfs");
214 TUNABLE_INT("hw.mwl.debug", &mwl_debug);
216 MWL_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
217 MWL_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */
218 MWL_DEBUG_RECV = 0x00000004, /* basic recv operation */
219 MWL_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */
220 MWL_DEBUG_RESET = 0x00000010, /* reset processing */
221 MWL_DEBUG_BEACON = 0x00000020, /* beacon handling */
222 MWL_DEBUG_INTR = 0x00000040, /* ISR */
223 MWL_DEBUG_TX_PROC = 0x00000080, /* tx ISR proc */
224 MWL_DEBUG_RX_PROC = 0x00000100, /* rx ISR proc */
225 MWL_DEBUG_KEYCACHE = 0x00000200, /* key cache management */
226 MWL_DEBUG_STATE = 0x00000400, /* 802.11 state transitions */
227 MWL_DEBUG_NODE = 0x00000800, /* node management */
228 MWL_DEBUG_RECV_ALL = 0x00001000, /* trace all frames (beacons) */
229 MWL_DEBUG_TSO = 0x00002000, /* TSO processing */
230 MWL_DEBUG_AMPDU = 0x00004000, /* BA stream handling */
231 MWL_DEBUG_ANY = 0xffffffff
233 #define IS_BEACON(wh) \
234 ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
235 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
236 #define IFF_DUMPPKTS_RECV(sc, wh) \
237 (((sc->sc_debug & MWL_DEBUG_RECV) && \
238 ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \
239 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
240 #define IFF_DUMPPKTS_XMIT(sc) \
241 ((sc->sc_debug & MWL_DEBUG_XMIT) || \
242 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
243 #define DPRINTF(sc, m, fmt, ...) do { \
244 if (sc->sc_debug & (m)) \
245 printf(fmt, __VA_ARGS__); \
247 #define KEYPRINTF(sc, hk, mac) do { \
248 if (sc->sc_debug & MWL_DEBUG_KEYCACHE) \
249 mwl_keyprint(sc, __func__, hk, mac); \
251 static void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
252 static void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
254 #define IFF_DUMPPKTS_RECV(sc, wh) \
255 ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
256 #define IFF_DUMPPKTS_XMIT(sc) \
257 ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
258 #define DPRINTF(sc, m, fmt, ...) do { \
261 #define KEYPRINTF(sc, k, mac) do { \
266 static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
269 * Each packet has fixed front matter: a 2-byte length
270 * of the payload, followed by a 4-address 802.11 header
271 * (regardless of the actual header and always w/o any
272 * QoS header). The payload then follows.
276 struct ieee80211_frame_addr4 wh;
280 * Read/Write shorthands for accesses to BAR 0. Note
281 * that all BAR 1 operations are done in the "hal" and
282 * there should be no reference to them here.
284 static __inline uint32_t
285 RD4(struct mwl_softc *sc, bus_size_t off)
287 return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
291 WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
293 bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
297 mwl_attach(uint16_t devid, struct mwl_softc *sc)
300 struct ieee80211com *ic;
304 DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
306 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
308 device_printf(sc->sc_dev, "cannot if_alloc()\n");
314 * Setup the RX free list lock early, so it can be consistently
319 /* set these up early for if_printf use */
320 if_initname(ifp, device_get_name(sc->sc_dev),
321 device_get_unit(sc->sc_dev));
323 mh = mwl_hal_attach(sc->sc_dev, devid,
324 sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
326 if_printf(ifp, "unable to attach HAL\n");
332 * Load firmware so we can get setup. We arbitrarily
333 * pick station firmware; we'll re-load firmware as
334 * needed so setting up the wrong mode isn't a big deal.
336 if (mwl_hal_fwload(mh, NULL) != 0) {
337 if_printf(ifp, "unable to setup builtin firmware\n");
341 if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
342 if_printf(ifp, "unable to fetch h/w specs\n");
346 error = mwl_getchannels(sc);
350 sc->sc_txantenna = 0; /* h/w default */
351 sc->sc_rxantenna = 0; /* h/w default */
352 sc->sc_invalid = 0; /* ready to go, enable int handling */
353 sc->sc_ageinterval = MWL_AGEINTERVAL;
356 * Allocate tx+rx descriptors and populate the lists.
357 * We immediately push the information to the firmware
358 * as otherwise it gets upset.
360 error = mwl_dma_setup(sc);
362 if_printf(ifp, "failed to setup descriptors: %d\n", error);
365 error = mwl_setupdma(sc); /* push to firmware */
366 if (error != 0) /* NB: mwl_setupdma prints msg */
369 callout_init(&sc->sc_timer, CALLOUT_MPSAFE);
370 callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
372 sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
373 taskqueue_thread_enqueue, &sc->sc_tq);
374 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
375 "%s taskq", ifp->if_xname);
377 TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
378 TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
379 TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
380 TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
382 /* NB: insure BK queue is the lowest priority h/w queue */
383 if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
384 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
385 ieee80211_wme_acnames[WME_AC_BK]);
389 if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
390 !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
391 !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
393 * Not enough hardware tx queues to properly do WME;
394 * just punt and assign them all to the same h/w queue.
395 * We could do a better job of this if, for example,
396 * we allocate queues when we switch from station to
399 if (sc->sc_ac2q[WME_AC_VI] != NULL)
400 mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
401 if (sc->sc_ac2q[WME_AC_BE] != NULL)
402 mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
403 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
404 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
405 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
407 TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
410 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
411 ifp->if_start = mwl_start;
412 ifp->if_ioctl = mwl_ioctl;
413 ifp->if_init = mwl_init;
414 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
415 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
416 IFQ_SET_READY(&ifp->if_snd);
419 /* XXX not right but it's not used anywhere important */
420 ic->ic_phytype = IEEE80211_T_OFDM;
421 ic->ic_opmode = IEEE80211_M_STA;
423 IEEE80211_C_STA /* station mode supported */
424 | IEEE80211_C_HOSTAP /* hostap mode */
425 | IEEE80211_C_MONITOR /* monitor mode */
427 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
428 | IEEE80211_C_AHDEMO /* adhoc demo mode */
430 | IEEE80211_C_MBSS /* mesh point link mode */
431 | IEEE80211_C_WDS /* WDS supported */
432 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
433 | IEEE80211_C_SHSLOT /* short slot time supported */
434 | IEEE80211_C_WME /* WME/WMM supported */
435 | IEEE80211_C_BURST /* xmit bursting supported */
436 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
437 | IEEE80211_C_BGSCAN /* capable of bg scanning */
438 | IEEE80211_C_TXFRAG /* handle tx frags */
439 | IEEE80211_C_TXPMGT /* capable of txpow mgt */
440 | IEEE80211_C_DFS /* DFS supported */
444 IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */
445 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */
446 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
447 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
448 | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
449 #if MWL_AGGR_SIZE == 7935
450 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */
452 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */
455 | IEEE80211_HTCAP_PSMP /* PSMP supported */
456 | IEEE80211_HTCAP_40INTOLERANT /* 40MHz intolerant */
458 /* s/w capabilities */
459 | IEEE80211_HTC_HT /* HT operation */
460 | IEEE80211_HTC_AMPDU /* tx A-MPDU */
461 | IEEE80211_HTC_AMSDU /* tx A-MSDU */
462 | IEEE80211_HTC_SMPS /* SMPS available */
466 * Mark h/w crypto support.
467 * XXX no way to query h/w support.
469 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
470 | IEEE80211_CRYPTO_AES_CCM
471 | IEEE80211_CRYPTO_TKIP
472 | IEEE80211_CRYPTO_TKIPMIC
475 * Transmit requires space in the packet for a special
476 * format transmit record and optional padding between
477 * this record and the payload. Ask the net80211 layer
478 * to arrange this when encapsulating packets so we can
479 * add it efficiently.
481 ic->ic_headroom = sizeof(struct mwltxrec) -
482 sizeof(struct ieee80211_frame);
484 /* call MI attach routine. */
485 ieee80211_ifattach(ic, sc->sc_hwspecs.macAddr);
486 ic->ic_setregdomain = mwl_setregdomain;
487 ic->ic_getradiocaps = mwl_getradiocaps;
488 /* override default methods */
489 ic->ic_raw_xmit = mwl_raw_xmit;
490 ic->ic_newassoc = mwl_newassoc;
491 ic->ic_updateslot = mwl_updateslot;
492 ic->ic_update_mcast = mwl_update_mcast;
493 ic->ic_update_promisc = mwl_update_promisc;
494 ic->ic_wme.wme_update = mwl_wme_update;
496 ic->ic_node_alloc = mwl_node_alloc;
497 sc->sc_node_cleanup = ic->ic_node_cleanup;
498 ic->ic_node_cleanup = mwl_node_cleanup;
499 sc->sc_node_drain = ic->ic_node_drain;
500 ic->ic_node_drain = mwl_node_drain;
501 ic->ic_node_getsignal = mwl_node_getsignal;
502 ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
504 ic->ic_scan_start = mwl_scan_start;
505 ic->ic_scan_end = mwl_scan_end;
506 ic->ic_set_channel = mwl_set_channel;
508 sc->sc_recv_action = ic->ic_recv_action;
509 ic->ic_recv_action = mwl_recv_action;
510 sc->sc_addba_request = ic->ic_addba_request;
511 ic->ic_addba_request = mwl_addba_request;
512 sc->sc_addba_response = ic->ic_addba_response;
513 ic->ic_addba_response = mwl_addba_response;
514 sc->sc_addba_stop = ic->ic_addba_stop;
515 ic->ic_addba_stop = mwl_addba_stop;
517 ic->ic_vap_create = mwl_vap_create;
518 ic->ic_vap_delete = mwl_vap_delete;
520 ieee80211_radiotap_attach(ic,
521 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
522 MWL_TX_RADIOTAP_PRESENT,
523 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
524 MWL_RX_RADIOTAP_PRESENT);
526 * Setup dynamic sysctl's now that country code and
527 * regdomain are available from the hal.
529 mwl_sysctlattach(sc);
532 ieee80211_announce(ic);
540 MWL_RXFREE_DESTROY(sc);
547 mwl_detach(struct mwl_softc *sc)
549 struct ifnet *ifp = sc->sc_ifp;
550 struct ieee80211com *ic = ifp->if_l2com;
552 DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
553 __func__, ifp->if_flags);
557 * NB: the order of these is important:
558 * o call the 802.11 layer before detaching the hal to
559 * insure callbacks into the driver to delete global
560 * key cache entries can be handled
561 * o reclaim the tx queue data structures after calling
562 * the 802.11 layer as we'll get called back to reclaim
563 * node state and potentially want to use them
564 * o to cleanup the tx queues the hal is called, so detach
566 * Other than that, it's straightforward...
568 ieee80211_ifdetach(ic);
569 callout_drain(&sc->sc_watchdog);
571 MWL_RXFREE_DESTROY(sc);
573 mwl_hal_detach(sc->sc_mh);
580 * MAC address handling for multiple BSS on the same radio.
581 * The first vap uses the MAC address from the EEPROM. For
582 * subsequent vap's we set the U/L bit (bit 1) in the MAC
583 * address and use the next six bits as an index.
586 assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
590 if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
591 /* NB: we only do this if h/w supports multiple bssid */
592 for (i = 0; i < 32; i++)
593 if ((sc->sc_bssidmask & (1<<i)) == 0)
596 mac[0] |= (i << 2)|0x2;
599 sc->sc_bssidmask |= 1<<i;
605 reclaim_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN])
608 if (i != 0 || --sc->sc_nbssid0 == 0)
609 sc->sc_bssidmask &= ~(1<<i);
612 static struct ieee80211vap *
613 mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
614 enum ieee80211_opmode opmode, int flags,
615 const uint8_t bssid[IEEE80211_ADDR_LEN],
616 const uint8_t mac0[IEEE80211_ADDR_LEN])
618 struct ifnet *ifp = ic->ic_ifp;
619 struct mwl_softc *sc = ifp->if_softc;
620 struct mwl_hal *mh = sc->sc_mh;
621 struct ieee80211vap *vap, *apvap;
622 struct mwl_hal_vap *hvap;
624 uint8_t mac[IEEE80211_ADDR_LEN];
626 IEEE80211_ADDR_COPY(mac, mac0);
628 case IEEE80211_M_HOSTAP:
629 case IEEE80211_M_MBSS:
630 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
631 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
632 hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
634 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
635 reclaim_address(sc, mac);
639 case IEEE80211_M_STA:
640 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
641 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
642 hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
644 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
645 reclaim_address(sc, mac);
648 /* no h/w beacon miss support; always use s/w */
649 flags |= IEEE80211_CLONE_NOBEACONS;
651 case IEEE80211_M_WDS:
652 hvap = NULL; /* NB: we use associated AP vap */
653 if (sc->sc_napvaps == 0)
654 return NULL; /* no existing AP vap */
656 case IEEE80211_M_MONITOR:
659 case IEEE80211_M_IBSS:
660 case IEEE80211_M_AHDEMO:
665 mvp = (struct mwl_vap *) malloc(sizeof(struct mwl_vap),
666 M_80211_VAP, M_NOWAIT | M_ZERO);
669 mwl_hal_delvap(hvap);
670 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
671 reclaim_address(sc, mac);
677 if (opmode == IEEE80211_M_WDS) {
679 * WDS vaps must have an associated AP vap; find one.
682 TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
683 if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
684 mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
687 KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
690 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
692 IEEE80211_ADDR_COPY(vap->iv_myaddr, mac);
693 /* override with driver methods */
694 mvp->mv_newstate = vap->iv_newstate;
695 vap->iv_newstate = mwl_newstate;
696 vap->iv_max_keyix = 0; /* XXX */
697 vap->iv_key_alloc = mwl_key_alloc;
698 vap->iv_key_delete = mwl_key_delete;
699 vap->iv_key_set = mwl_key_set;
700 #ifdef MWL_HOST_PS_SUPPORT
701 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
702 vap->iv_update_ps = mwl_update_ps;
703 mvp->mv_set_tim = vap->iv_set_tim;
704 vap->iv_set_tim = mwl_set_tim;
707 vap->iv_reset = mwl_reset;
708 vap->iv_update_beacon = mwl_beacon_update;
710 /* override max aid so sta's cannot assoc when we're out of sta id's */
711 vap->iv_max_aid = MWL_MAXSTAID;
712 /* override default A-MPDU rx parameters */
713 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
714 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
717 ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status);
719 switch (vap->iv_opmode) {
720 case IEEE80211_M_HOSTAP:
721 case IEEE80211_M_MBSS:
722 case IEEE80211_M_STA:
724 * Setup sta db entry for local address.
727 if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
728 vap->iv_opmode == IEEE80211_M_MBSS)
733 case IEEE80211_M_WDS:
740 * Setup overall operating mode.
743 ic->ic_opmode = IEEE80211_M_HOSTAP;
744 else if (sc->sc_nstavaps)
745 ic->ic_opmode = IEEE80211_M_STA;
747 ic->ic_opmode = opmode;
753 mwl_vap_delete(struct ieee80211vap *vap)
755 struct mwl_vap *mvp = MWL_VAP(vap);
756 struct ifnet *parent = vap->iv_ic->ic_ifp;
757 struct mwl_softc *sc = parent->if_softc;
758 struct mwl_hal *mh = sc->sc_mh;
759 struct mwl_hal_vap *hvap = mvp->mv_hvap;
760 enum ieee80211_opmode opmode = vap->iv_opmode;
762 /* XXX disallow ap vap delete if WDS still present */
763 if (parent->if_drv_flags & IFF_DRV_RUNNING) {
764 /* quiesce h/w while we remove the vap */
765 mwl_hal_intrset(mh, 0); /* disable interrupts */
767 ieee80211_vap_detach(vap);
769 case IEEE80211_M_HOSTAP:
770 case IEEE80211_M_MBSS:
771 case IEEE80211_M_STA:
772 KASSERT(hvap != NULL, ("no hal vap handle"));
773 (void) mwl_hal_delstation(hvap, vap->iv_myaddr);
774 mwl_hal_delvap(hvap);
775 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
779 /* XXX don't do it for IEEE80211_CLONE_MACADDR */
780 reclaim_address(sc, vap->iv_myaddr);
782 case IEEE80211_M_WDS:
788 mwl_cleartxq(sc, vap);
789 free(mvp, M_80211_VAP);
790 if (parent->if_drv_flags & IFF_DRV_RUNNING)
791 mwl_hal_intrset(mh, sc->sc_imask);
795 mwl_suspend(struct mwl_softc *sc)
797 struct ifnet *ifp = sc->sc_ifp;
799 DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
800 __func__, ifp->if_flags);
806 mwl_resume(struct mwl_softc *sc)
808 struct ifnet *ifp = sc->sc_ifp;
810 DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
811 __func__, ifp->if_flags);
813 if (ifp->if_flags & IFF_UP)
818 mwl_shutdown(void *arg)
820 struct mwl_softc *sc = arg;
822 mwl_stop(sc->sc_ifp, 1);
826 * Interrupt handler. Most of the actual processing is deferred.
831 struct mwl_softc *sc = arg;
832 struct mwl_hal *mh = sc->sc_mh;
835 if (sc->sc_invalid) {
837 * The hardware is not ready/present, don't touch anything.
838 * Note this can happen early on if the IRQ is shared.
840 DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
844 * Figure out the reason(s) for the interrupt.
846 mwl_hal_getisr(mh, &status); /* NB: clears ISR too */
847 if (status == 0) /* must be a shared irq */
850 DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
851 __func__, status, sc->sc_imask);
852 if (status & MACREG_A2HRIC_BIT_RX_RDY)
853 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
854 if (status & MACREG_A2HRIC_BIT_TX_DONE)
855 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
856 if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
857 taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
858 if (status & MACREG_A2HRIC_BIT_OPC_DONE)
860 if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
863 if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
865 sc->sc_stats.mst_rx_badtkipicv++;
867 if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
868 /* 11n aggregation queue is empty, re-fill */
871 if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
874 if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
875 /* radar detected, process event */
876 taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
878 if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
879 /* DFS channel switch */
880 taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
885 mwl_radar_proc(void *arg, int pending)
887 struct mwl_softc *sc = arg;
888 struct ifnet *ifp = sc->sc_ifp;
889 struct ieee80211com *ic = ifp->if_l2com;
891 DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
894 sc->sc_stats.mst_radardetect++;
895 /* XXX stop h/w BA streams? */
898 ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
899 IEEE80211_UNLOCK(ic);
903 mwl_chanswitch_proc(void *arg, int pending)
905 struct mwl_softc *sc = arg;
906 struct ifnet *ifp = sc->sc_ifp;
907 struct ieee80211com *ic = ifp->if_l2com;
909 DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
913 sc->sc_csapending = 0;
914 ieee80211_csa_completeswitch(ic);
915 IEEE80211_UNLOCK(ic);
919 mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
921 struct ieee80211_node *ni = sp->data[0];
923 /* send DELBA and drop the stream */
924 ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
928 mwl_bawatchdog_proc(void *arg, int pending)
930 struct mwl_softc *sc = arg;
931 struct mwl_hal *mh = sc->sc_mh;
932 const MWL_HAL_BASTREAM *sp;
935 sc->sc_stats.mst_bawatchdog++;
937 if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
938 DPRINTF(sc, MWL_DEBUG_AMPDU,
939 "%s: could not get bitmap\n", __func__);
940 sc->sc_stats.mst_bawatchdog_failed++;
943 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
944 if (bitmap == 0xff) {
946 /* disable all ba streams */
947 for (bitmap = 0; bitmap < 8; bitmap++) {
948 sp = mwl_hal_bastream_lookup(mh, bitmap);
955 DPRINTF(sc, MWL_DEBUG_AMPDU,
956 "%s: no BA streams found\n", __func__);
957 sc->sc_stats.mst_bawatchdog_empty++;
959 } else if (bitmap != 0xaa) {
960 /* disable a single ba stream */
961 sp = mwl_hal_bastream_lookup(mh, bitmap);
965 DPRINTF(sc, MWL_DEBUG_AMPDU,
966 "%s: no BA stream %d\n", __func__, bitmap);
967 sc->sc_stats.mst_bawatchdog_notfound++;
973 * Convert net80211 channel to a HAL channel.
976 mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
978 hc->channel = chan->ic_ieee;
980 *(uint32_t *)&hc->channelFlags = 0;
981 if (IEEE80211_IS_CHAN_2GHZ(chan))
982 hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
983 else if (IEEE80211_IS_CHAN_5GHZ(chan))
984 hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
985 if (IEEE80211_IS_CHAN_HT40(chan)) {
986 hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
987 if (IEEE80211_IS_CHAN_HT40U(chan))
988 hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
990 hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
992 hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
993 /* XXX 10MHz channels */
997 * Inform firmware of our tx/rx dma setup. The BAR 0
998 * writes below are for compatibility with older firmware.
999 * For current firmware we send this information with a
1000 * cmd block via mwl_hal_sethwdma.
1003 mwl_setupdma(struct mwl_softc *sc)
1007 sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
1008 WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
1009 WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
1011 for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
1012 struct mwl_txq *txq = &sc->sc_txq[i];
1013 sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
1014 WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
1016 sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
1017 sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
1019 error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
1021 device_printf(sc->sc_dev,
1022 "unable to setup tx/rx dma; hal status %u\n", error);
1029 * Inform firmware of tx rate parameters.
1030 * Called after a channel change.
1033 mwl_setcurchanrates(struct mwl_softc *sc)
1035 struct ifnet *ifp = sc->sc_ifp;
1036 struct ieee80211com *ic = ifp->if_l2com;
1037 const struct ieee80211_rateset *rs;
1038 MWL_HAL_TXRATE rates;
1040 memset(&rates, 0, sizeof(rates));
1041 rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1042 /* rate used to send management frames */
1043 rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1044 /* rate used to send multicast frames */
1045 rates.McastRate = rates.MgtRate;
1047 return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1051 * Inform firmware of tx rate parameters. Called whenever
1052 * user-settable params change and after a channel change.
1055 mwl_setrates(struct ieee80211vap *vap)
1057 struct mwl_vap *mvp = MWL_VAP(vap);
1058 struct ieee80211_node *ni = vap->iv_bss;
1059 const struct ieee80211_txparam *tp = ni->ni_txparms;
1060 MWL_HAL_TXRATE rates;
1062 KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1065 * Update the h/w rate map.
1066 * NB: 0x80 for MCS is passed through unchanged
1068 memset(&rates, 0, sizeof(rates));
1069 /* rate used to send management frames */
1070 rates.MgtRate = tp->mgmtrate;
1071 /* rate used to send multicast frames */
1072 rates.McastRate = tp->mcastrate;
1074 /* while here calculate EAPOL fixed rate cookie */
1075 mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1077 return mwl_hal_settxrate(mvp->mv_hvap,
1078 tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1079 RATE_FIXED : RATE_AUTO, &rates);
1083 * Setup a fixed xmit rate cookie for EAPOL frames.
1086 mwl_seteapolformat(struct ieee80211vap *vap)
1088 struct mwl_vap *mvp = MWL_VAP(vap);
1089 struct ieee80211_node *ni = vap->iv_bss;
1090 enum ieee80211_phymode mode;
1093 KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1095 mode = ieee80211_chan2mode(ni->ni_chan);
1097 * Use legacy rates when operating a mixed HT+non-HT bss.
1098 * NB: this may violate POLA for sta and wds vap's.
1100 if (mode == IEEE80211_MODE_11NA &&
1101 (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1102 rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1103 else if (mode == IEEE80211_MODE_11NG &&
1104 (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1105 rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1107 rate = vap->iv_txparms[mode].mgmtrate;
1109 mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1113 * Map SKU+country code to region code for radar bin'ing.
1116 mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1118 switch (rd->regdomain) {
1121 return DOMAIN_CODE_FCC;
1123 return DOMAIN_CODE_IC;
1127 if (rd->country == CTRY_SPAIN)
1128 return DOMAIN_CODE_SPAIN;
1129 if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1130 return DOMAIN_CODE_FRANCE;
1131 /* XXX force 1.3.1 radar type */
1132 return DOMAIN_CODE_ETSI_131;
1134 return DOMAIN_CODE_MKK;
1136 return DOMAIN_CODE_DGT; /* Taiwan */
1140 return DOMAIN_CODE_AUS; /* Australia */
1143 return DOMAIN_CODE_FCC; /* XXX? */
1147 mwl_hal_reset(struct mwl_softc *sc)
1149 struct ifnet *ifp = sc->sc_ifp;
1150 struct ieee80211com *ic = ifp->if_l2com;
1151 struct mwl_hal *mh = sc->sc_mh;
1153 mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1154 mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1155 mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1156 mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1157 mwl_chan_set(sc, ic->ic_curchan);
1158 /* NB: RF/RA performance tuned for indoor mode */
1159 mwl_hal_setrateadaptmode(mh, 0);
1160 mwl_hal_setoptimizationlevel(mh,
1161 (ic->ic_flags & IEEE80211_F_BURST) != 0);
1163 mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1165 mwl_hal_setaggampduratemode(mh, 1, 80); /* XXX */
1166 mwl_hal_setcfend(mh, 0); /* XXX */
1172 mwl_init_locked(struct mwl_softc *sc)
1174 struct ifnet *ifp = sc->sc_ifp;
1175 struct mwl_hal *mh = sc->sc_mh;
1178 DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1179 __func__, ifp->if_flags);
1181 MWL_LOCK_ASSERT(sc);
1184 * Stop anything previously setup. This is safe
1185 * whether this is the first time through or not.
1187 mwl_stop_locked(ifp, 0);
1190 * Push vap-independent state to the firmware.
1192 if (!mwl_hal_reset(sc)) {
1193 if_printf(ifp, "unable to reset hardware\n");
1198 * Setup recv (once); transmit is already good to go.
1200 error = mwl_startrecv(sc);
1202 if_printf(ifp, "unable to start recv logic\n");
1207 * Enable interrupts.
1209 sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1210 | MACREG_A2HRIC_BIT_TX_DONE
1211 | MACREG_A2HRIC_BIT_OPC_DONE
1213 | MACREG_A2HRIC_BIT_MAC_EVENT
1215 | MACREG_A2HRIC_BIT_ICV_ERROR
1216 | MACREG_A2HRIC_BIT_RADAR_DETECT
1217 | MACREG_A2HRIC_BIT_CHAN_SWITCH
1219 | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1221 | MACREG_A2HRIC_BIT_BA_WATCHDOG
1222 | MACREQ_A2HRIC_BIT_TX_ACK
1225 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1226 mwl_hal_intrset(mh, sc->sc_imask);
1227 callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1235 struct mwl_softc *sc = arg;
1236 struct ifnet *ifp = sc->sc_ifp;
1237 struct ieee80211com *ic = ifp->if_l2com;
1240 DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1241 __func__, ifp->if_flags);
1244 error = mwl_init_locked(sc);
1248 ieee80211_start_all(ic); /* start all vap's */
1252 mwl_stop_locked(struct ifnet *ifp, int disable)
1254 struct mwl_softc *sc = ifp->if_softc;
1256 DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1257 __func__, sc->sc_invalid, ifp->if_flags);
1259 MWL_LOCK_ASSERT(sc);
1260 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1262 * Shutdown the hardware and driver.
1264 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1265 callout_stop(&sc->sc_watchdog);
1266 sc->sc_tx_timer = 0;
1272 mwl_stop(struct ifnet *ifp, int disable)
1274 struct mwl_softc *sc = ifp->if_softc;
1277 mwl_stop_locked(ifp, disable);
1282 mwl_reset_vap(struct ieee80211vap *vap, int state)
1284 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1285 struct ieee80211com *ic = vap->iv_ic;
1287 if (state == IEEE80211_S_RUN)
1290 mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1291 /* XXX auto? 20/40 split? */
1292 mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1293 (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1294 mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1295 HTPROTECT_NONE : HTPROTECT_AUTO);
1296 /* XXX txpower cap */
1298 /* re-setup beacons */
1299 if (state == IEEE80211_S_RUN &&
1300 (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1301 vap->iv_opmode == IEEE80211_M_MBSS ||
1302 vap->iv_opmode == IEEE80211_M_IBSS)) {
1303 mwl_setapmode(vap, vap->iv_bss->ni_chan);
1304 mwl_hal_setnprotmode(hvap,
1305 MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1306 return mwl_beacon_setup(vap);
1312 * Reset the hardware w/o losing operational state.
1313 * Used to to reset or reload hardware state for a vap.
1316 mwl_reset(struct ieee80211vap *vap, u_long cmd)
1318 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1321 if (hvap != NULL) { /* WDS, MONITOR, etc. */
1322 struct ieee80211com *ic = vap->iv_ic;
1323 struct ifnet *ifp = ic->ic_ifp;
1324 struct mwl_softc *sc = ifp->if_softc;
1325 struct mwl_hal *mh = sc->sc_mh;
1327 /* XXX handle DWDS sta vap change */
1328 /* XXX do we need to disable interrupts? */
1329 mwl_hal_intrset(mh, 0); /* disable interrupts */
1330 error = mwl_reset_vap(vap, vap->iv_state);
1331 mwl_hal_intrset(mh, sc->sc_imask);
1337 * Allocate a tx buffer for sending a frame. The
1338 * packet is assumed to have the WME AC stored so
1339 * we can use it to select the appropriate h/w queue.
1341 static struct mwl_txbuf *
1342 mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1344 struct mwl_txbuf *bf;
1347 * Grab a TX buffer and associated resources.
1350 bf = STAILQ_FIRST(&txq->free);
1352 STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1355 MWL_TXQ_UNLOCK(txq);
1357 DPRINTF(sc, MWL_DEBUG_XMIT,
1358 "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1363 * Return a tx buffer to the queue it came from. Note there
1364 * are two cases because we must preserve the order of buffers
1365 * as it reflects the fixed order of descriptors in memory
1366 * (the firmware pre-fetches descriptors so we cannot reorder).
1369 mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1374 STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1376 MWL_TXQ_UNLOCK(txq);
1380 mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1385 STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1387 MWL_TXQ_UNLOCK(txq);
1391 mwl_start(struct ifnet *ifp)
1393 struct mwl_softc *sc = ifp->if_softc;
1394 struct ieee80211_node *ni;
1395 struct mwl_txbuf *bf;
1397 struct mwl_txq *txq = NULL; /* XXX silence gcc */
1400 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1405 IFQ_DEQUEUE(&ifp->if_snd, m);
1409 * Grab the node for the destination.
1411 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1412 KASSERT(ni != NULL, ("no node"));
1413 m->m_pkthdr.rcvif = NULL; /* committed, clear ref */
1415 * Grab a TX buffer and associated resources.
1416 * We honor the classification by the 802.11 layer.
1418 txq = sc->sc_ac2q[M_WME_GETAC(m)];
1419 bf = mwl_gettxbuf(sc, txq);
1422 ieee80211_free_node(ni);
1423 #ifdef MWL_TX_NODROP
1424 sc->sc_stats.mst_tx_qstop++;
1425 /* XXX blocks other traffic */
1426 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1429 DPRINTF(sc, MWL_DEBUG_XMIT,
1430 "%s: tail drop on q %d\n", __func__, txq->qnum);
1431 sc->sc_stats.mst_tx_qdrop++;
1433 #endif /* MWL_TX_NODROP */
1437 * Pass the frame to the h/w for transmission.
1439 if (mwl_tx_start(sc, ni, bf, m)) {
1441 mwl_puttxbuf_head(txq, bf);
1442 ieee80211_free_node(ni);
1446 if (nqueued >= mwl_txcoalesce) {
1448 * Poke the firmware to process queued frames;
1449 * see below about (lack of) locking.
1452 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1457 * NB: We don't need to lock against tx done because
1458 * this just prods the firmware to check the transmit
1459 * descriptors. The firmware will also start fetching
1460 * descriptors by itself if it notices new ones are
1461 * present when it goes to deliver a tx done interrupt
1462 * to the host. So if we race with tx done processing
1463 * it's ok. Delivering the kick here rather than in
1464 * mwl_tx_start is an optimization to avoid poking the
1465 * firmware for each packet.
1467 * NB: the queue id isn't used so 0 is ok.
1469 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1474 mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1475 const struct ieee80211_bpf_params *params)
1477 struct ieee80211com *ic = ni->ni_ic;
1478 struct ifnet *ifp = ic->ic_ifp;
1479 struct mwl_softc *sc = ifp->if_softc;
1480 struct mwl_txbuf *bf;
1481 struct mwl_txq *txq;
1483 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
1484 ieee80211_free_node(ni);
1489 * Grab a TX buffer and associated resources.
1490 * Note that we depend on the classification
1491 * by the 802.11 layer to get to the right h/w
1492 * queue. Management frames must ALWAYS go on
1493 * queue 1 but we cannot just force that here
1494 * because we may receive non-mgt frames.
1496 txq = sc->sc_ac2q[M_WME_GETAC(m)];
1497 bf = mwl_gettxbuf(sc, txq);
1499 sc->sc_stats.mst_tx_qstop++;
1500 /* XXX blocks other traffic */
1501 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1502 ieee80211_free_node(ni);
1507 * Pass the frame to the h/w for transmission.
1509 if (mwl_tx_start(sc, ni, bf, m)) {
1511 mwl_puttxbuf_head(txq, bf);
1513 ieee80211_free_node(ni);
1514 return EIO; /* XXX */
1517 * NB: We don't need to lock against tx done because
1518 * this just prods the firmware to check the transmit
1519 * descriptors. The firmware will also start fetching
1520 * descriptors by itself if it notices new ones are
1521 * present when it goes to deliver a tx done interrupt
1522 * to the host. So if we race with tx done processing
1523 * it's ok. Delivering the kick here rather than in
1524 * mwl_tx_start is an optimization to avoid poking the
1525 * firmware for each packet.
1527 * NB: the queue id isn't used so 0 is ok.
1529 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1534 mwl_media_change(struct ifnet *ifp)
1536 struct ieee80211vap *vap = ifp->if_softc;
1539 error = ieee80211_media_change(ifp);
1540 /* NB: only the fixed rate can change and that doesn't need a reset */
1541 if (error == ENETRESET) {
1550 mwl_keyprint(struct mwl_softc *sc, const char *tag,
1551 const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1553 static const char *ciphers[] = {
1560 printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1561 for (i = 0, n = hk->keyLen; i < n; i++)
1562 printf(" %02x", hk->key.aes[i]);
1563 printf(" mac %s", ether_sprintf(mac));
1564 if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1565 printf(" %s", "rxmic");
1566 for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1567 printf(" %02x", hk->key.tkip.rxMic[i]);
1569 for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1570 printf(" %02x", hk->key.tkip.txMic[i]);
1572 printf(" flags 0x%x\n", hk->keyFlags);
1577 * Allocate a key cache slot for a unicast key. The
1578 * firmware handles key allocation and every station is
1579 * guaranteed key space so we are always successful.
1582 mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1583 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1585 struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1587 if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1588 (k->wk_flags & IEEE80211_KEY_GROUP)) {
1589 if (!(&vap->iv_nw_keys[0] <= k &&
1590 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1591 /* should not happen */
1592 DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1593 "%s: bogus group key\n", __func__);
1596 /* give the caller what they requested */
1597 *keyix = *rxkeyix = k - vap->iv_nw_keys;
1600 * Firmware handles key allocation.
1602 *keyix = *rxkeyix = 0;
1608 * Delete a key entry allocated by mwl_key_alloc.
1611 mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1613 struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1614 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1616 const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1617 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1620 if (vap->iv_opmode != IEEE80211_M_WDS) {
1621 /* XXX monitor mode? */
1622 DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1623 "%s: no hvap for opmode %d\n", __func__,
1627 hvap = MWL_VAP(vap)->mv_ap_hvap;
1630 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1631 __func__, k->wk_keyix);
1633 memset(&hk, 0, sizeof(hk));
1634 hk.keyIndex = k->wk_keyix;
1635 switch (k->wk_cipher->ic_cipher) {
1636 case IEEE80211_CIPHER_WEP:
1637 hk.keyTypeId = KEY_TYPE_ID_WEP;
1639 case IEEE80211_CIPHER_TKIP:
1640 hk.keyTypeId = KEY_TYPE_ID_TKIP;
1642 case IEEE80211_CIPHER_AES_CCM:
1643 hk.keyTypeId = KEY_TYPE_ID_AES;
1646 /* XXX should not happen */
1647 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1648 __func__, k->wk_cipher->ic_cipher);
1651 return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0); /*XXX*/
1655 addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1657 if (k->wk_flags & IEEE80211_KEY_GROUP) {
1658 if (k->wk_flags & IEEE80211_KEY_XMIT)
1659 hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1660 if (k->wk_flags & IEEE80211_KEY_RECV)
1661 hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1668 * Set the key cache contents for the specified key. Key cache
1669 * slot(s) must already have been allocated by mwl_key_alloc.
1672 mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1673 const uint8_t mac[IEEE80211_ADDR_LEN])
1675 #define GRPXMIT (IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1676 /* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1677 #define IEEE80211_IS_STATICKEY(k) \
1678 (((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1679 (GRPXMIT|IEEE80211_KEY_RECV))
1680 struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1681 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1682 const struct ieee80211_cipher *cip = k->wk_cipher;
1683 const uint8_t *macaddr;
1686 KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1687 ("s/w crypto set?"));
1690 if (vap->iv_opmode != IEEE80211_M_WDS) {
1691 /* XXX monitor mode? */
1692 DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1693 "%s: no hvap for opmode %d\n", __func__,
1697 hvap = MWL_VAP(vap)->mv_ap_hvap;
1699 memset(&hk, 0, sizeof(hk));
1700 hk.keyIndex = k->wk_keyix;
1701 switch (cip->ic_cipher) {
1702 case IEEE80211_CIPHER_WEP:
1703 hk.keyTypeId = KEY_TYPE_ID_WEP;
1704 hk.keyLen = k->wk_keylen;
1705 if (k->wk_keyix == vap->iv_def_txkey)
1706 hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1707 if (!IEEE80211_IS_STATICKEY(k)) {
1708 /* NB: WEP is never used for the PTK */
1709 (void) addgroupflags(&hk, k);
1712 case IEEE80211_CIPHER_TKIP:
1713 hk.keyTypeId = KEY_TYPE_ID_TKIP;
1714 hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1715 hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1716 hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1717 hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1718 if (!addgroupflags(&hk, k))
1719 hk.keyFlags |= KEY_FLAG_PAIRWISE;
1721 case IEEE80211_CIPHER_AES_CCM:
1722 hk.keyTypeId = KEY_TYPE_ID_AES;
1723 hk.keyLen = k->wk_keylen;
1724 if (!addgroupflags(&hk, k))
1725 hk.keyFlags |= KEY_FLAG_PAIRWISE;
1728 /* XXX should not happen */
1729 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1730 __func__, k->wk_cipher->ic_cipher);
1734 * NB: tkip mic keys get copied here too; the layout
1735 * just happens to match that in ieee80211_key.
1737 memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1740 * Locate address of sta db entry for writing key;
1741 * the convention unfortunately is somewhat different
1742 * than how net80211, hostapd, and wpa_supplicant think.
1744 if (vap->iv_opmode == IEEE80211_M_STA) {
1746 * NB: keys plumbed before the sta reaches AUTH state
1747 * will be discarded or written to the wrong sta db
1748 * entry because iv_bss is meaningless. This is ok
1749 * (right now) because we handle deferred plumbing of
1750 * WEP keys when the sta reaches AUTH state.
1752 macaddr = vap->iv_bss->ni_bssid;
1753 if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1754 /* XXX plumb to local sta db too for static key wep */
1755 mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1757 } else if (vap->iv_opmode == IEEE80211_M_WDS &&
1758 vap->iv_state != IEEE80211_S_RUN) {
1760 * Prior to RUN state a WDS vap will not it's BSS node
1761 * setup so we will plumb the key to the wrong mac
1762 * address (it'll be our local address). Workaround
1763 * this for the moment by grabbing the correct address.
1765 macaddr = vap->iv_des_bssid;
1766 } else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1767 macaddr = vap->iv_myaddr;
1770 KEYPRINTF(sc, &hk, macaddr);
1771 return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1772 #undef IEEE80211_IS_STATICKEY
1776 /* unaligned little endian access */
1777 #define LE_READ_2(p) \
1779 ((((const uint8_t *)(p))[0] ) | \
1780 (((const uint8_t *)(p))[1] << 8)))
1781 #define LE_READ_4(p) \
1783 ((((const uint8_t *)(p))[0] ) | \
1784 (((const uint8_t *)(p))[1] << 8) | \
1785 (((const uint8_t *)(p))[2] << 16) | \
1786 (((const uint8_t *)(p))[3] << 24)))
1789 * Set the multicast filter contents into the hardware.
1790 * XXX f/w has no support; just defer to the os.
1793 mwl_setmcastfilter(struct mwl_softc *sc)
1795 struct ifnet *ifp = sc->sc_ifp;
1797 struct ether_multi *enm;
1798 struct ether_multistep estep;
1799 uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1805 ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1806 while (enm != NULL) {
1807 /* XXX Punt on ranges. */
1808 if (nmc == MWL_HAL_MCAST_MAX ||
1809 !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1810 ifp->if_flags |= IFF_ALLMULTI;
1813 IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1814 mp += IEEE80211_ADDR_LEN, nmc++;
1815 ETHER_NEXT_MULTI(estep, enm);
1817 ifp->if_flags &= ~IFF_ALLMULTI;
1818 mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1820 /* XXX no mcast filter support; we get everything */
1821 ifp->if_flags |= IFF_ALLMULTI;
1826 mwl_mode_init(struct mwl_softc *sc)
1828 struct ifnet *ifp = sc->sc_ifp;
1829 struct ieee80211com *ic = ifp->if_l2com;
1830 struct mwl_hal *mh = sc->sc_mh;
1833 * NB: Ignore promisc in hostap mode; it's set by the
1834 * bridge. This is wrong but we have no way to
1835 * identify internal requests (from the bridge)
1836 * versus external requests such as for tcpdump.
1838 mwl_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) &&
1839 ic->ic_opmode != IEEE80211_M_HOSTAP);
1840 mwl_setmcastfilter(sc);
1846 * Callback from the 802.11 layer after a multicast state change.
1849 mwl_update_mcast(struct ifnet *ifp)
1851 struct mwl_softc *sc = ifp->if_softc;
1853 mwl_setmcastfilter(sc);
1857 * Callback from the 802.11 layer after a promiscuous mode change.
1858 * Note this interface does not check the operating mode as this
1859 * is an internal callback and we are expected to honor the current
1860 * state (e.g. this is used for setting the interface in promiscuous
1861 * mode when operating in hostap mode to do ACS).
1864 mwl_update_promisc(struct ifnet *ifp)
1866 struct mwl_softc *sc = ifp->if_softc;
1868 mwl_hal_setpromisc(sc->sc_mh, (ifp->if_flags & IFF_PROMISC) != 0);
1872 * Callback from the 802.11 layer to update the slot time
1873 * based on the current setting. We use it to notify the
1874 * firmware of ERP changes and the f/w takes care of things
1875 * like slot time and preamble.
1878 mwl_updateslot(struct ifnet *ifp)
1880 struct mwl_softc *sc = ifp->if_softc;
1881 struct ieee80211com *ic = ifp->if_l2com;
1882 struct mwl_hal *mh = sc->sc_mh;
1885 /* NB: can be called early; suppress needless cmds */
1886 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1890 * Calculate the ERP flags. The firwmare will use
1891 * this to carry out the appropriate measures.
1894 if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1895 if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1896 prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1897 if (ic->ic_flags & IEEE80211_F_USEPROT)
1898 prot |= IEEE80211_ERP_USE_PROTECTION;
1899 if (ic->ic_flags & IEEE80211_F_USEBARKER)
1900 prot |= IEEE80211_ERP_LONG_PREAMBLE;
1903 DPRINTF(sc, MWL_DEBUG_RESET,
1904 "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1905 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1906 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1909 mwl_hal_setgprot(mh, prot);
1913 * Setup the beacon frame.
1916 mwl_beacon_setup(struct ieee80211vap *vap)
1918 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1919 struct ieee80211_node *ni = vap->iv_bss;
1920 struct ieee80211_beacon_offsets bo;
1923 m = ieee80211_beacon_alloc(ni, &bo);
1926 mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1933 * Update the beacon frame in response to a change.
1936 mwl_beacon_update(struct ieee80211vap *vap, int item)
1938 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1939 struct ieee80211com *ic = vap->iv_ic;
1941 KASSERT(hvap != NULL, ("no beacon"));
1943 case IEEE80211_BEACON_ERP:
1944 mwl_updateslot(ic->ic_ifp);
1946 case IEEE80211_BEACON_HTINFO:
1947 mwl_hal_setnprotmode(hvap,
1948 MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1950 case IEEE80211_BEACON_CAPS:
1951 case IEEE80211_BEACON_WME:
1952 case IEEE80211_BEACON_APPIE:
1953 case IEEE80211_BEACON_CSA:
1955 case IEEE80211_BEACON_TIM:
1956 /* NB: firmware always forms TIM */
1959 /* XXX retain beacon frame and update */
1960 mwl_beacon_setup(vap);
1964 mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1966 bus_addr_t *paddr = (bus_addr_t*) arg;
1967 KASSERT(error == 0, ("error %u on bus_dma callback", error));
1968 *paddr = segs->ds_addr;
1971 #ifdef MWL_HOST_PS_SUPPORT
1973 * Handle power save station occupancy changes.
1976 mwl_update_ps(struct ieee80211vap *vap, int nsta)
1978 struct mwl_vap *mvp = MWL_VAP(vap);
1980 if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1981 mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1982 mvp->mv_last_ps_sta = nsta;
1986 * Handle associated station power save state changes.
1989 mwl_set_tim(struct ieee80211_node *ni, int set)
1991 struct ieee80211vap *vap = ni->ni_vap;
1992 struct mwl_vap *mvp = MWL_VAP(vap);
1994 if (mvp->mv_set_tim(ni, set)) { /* NB: state change */
1995 mwl_hal_setpowersave_sta(mvp->mv_hvap,
1996 IEEE80211_AID(ni->ni_associd), set);
2001 #endif /* MWL_HOST_PS_SUPPORT */
2004 mwl_desc_setup(struct mwl_softc *sc, const char *name,
2005 struct mwl_descdma *dd,
2006 int nbuf, size_t bufsize, int ndesc, size_t descsize)
2008 struct ifnet *ifp = sc->sc_ifp;
2012 DPRINTF(sc, MWL_DEBUG_RESET,
2013 "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
2014 __func__, name, nbuf, (uintmax_t) bufsize,
2015 ndesc, (uintmax_t) descsize);
2018 dd->dd_desc_len = nbuf * ndesc * descsize;
2021 * Setup DMA descriptor area.
2023 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
2024 PAGE_SIZE, 0, /* alignment, bounds */
2025 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2026 BUS_SPACE_MAXADDR, /* highaddr */
2027 NULL, NULL, /* filter, filterarg */
2028 dd->dd_desc_len, /* maxsize */
2030 dd->dd_desc_len, /* maxsegsize */
2031 BUS_DMA_ALLOCNOW, /* flags */
2032 NULL, /* lockfunc */
2036 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2040 /* allocate descriptors */
2041 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
2043 if_printf(ifp, "unable to create dmamap for %s descriptors, "
2044 "error %u\n", dd->dd_name, error);
2048 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2049 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2052 if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2053 "error %u\n", nbuf * ndesc, dd->dd_name, error);
2057 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2058 dd->dd_desc, dd->dd_desc_len,
2059 mwl_load_cb, &dd->dd_desc_paddr,
2062 if_printf(ifp, "unable to map %s descriptors, error %u\n",
2063 dd->dd_name, error);
2068 memset(ds, 0, dd->dd_desc_len);
2069 DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
2070 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
2071 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
2075 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2077 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2079 bus_dma_tag_destroy(dd->dd_dmat);
2080 memset(dd, 0, sizeof(*dd));
2086 mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
2088 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2089 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2090 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2091 bus_dma_tag_destroy(dd->dd_dmat);
2093 memset(dd, 0, sizeof(*dd));
2097 * Construct a tx q's free list. The order of entries on
2098 * the list must reflect the physical layout of tx descriptors
2099 * because the firmware pre-fetches descriptors.
2101 * XXX might be better to use indices into the buffer array.
2104 mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2106 struct mwl_txbuf *bf;
2109 bf = txq->dma.dd_bufptr;
2110 STAILQ_INIT(&txq->free);
2111 for (i = 0; i < mwl_txbuf; i++, bf++)
2112 STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2116 #define DS2PHYS(_dd, _ds) \
2117 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2120 mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2122 struct ifnet *ifp = sc->sc_ifp;
2123 int error, bsize, i;
2124 struct mwl_txbuf *bf;
2125 struct mwl_txdesc *ds;
2127 error = mwl_desc_setup(sc, "tx", &txq->dma,
2128 mwl_txbuf, sizeof(struct mwl_txbuf),
2129 MWL_TXDESC, sizeof(struct mwl_txdesc));
2133 /* allocate and setup tx buffers */
2134 bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2135 bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2137 if_printf(ifp, "malloc of %u tx buffers failed\n",
2141 txq->dma.dd_bufptr = bf;
2143 ds = txq->dma.dd_desc;
2144 for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2146 bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2147 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2150 if_printf(ifp, "unable to create dmamap for tx "
2151 "buffer %u, error %u\n", i, error);
2155 mwl_txq_reset(sc, txq);
2160 mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2162 struct mwl_txbuf *bf;
2165 bf = txq->dma.dd_bufptr;
2166 for (i = 0; i < mwl_txbuf; i++, bf++) {
2167 KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2168 KASSERT(bf->bf_node == NULL, ("node on free list"));
2169 if (bf->bf_dmamap != NULL)
2170 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2172 STAILQ_INIT(&txq->free);
2174 if (txq->dma.dd_bufptr != NULL) {
2175 free(txq->dma.dd_bufptr, M_MWLDEV);
2176 txq->dma.dd_bufptr = NULL;
2178 if (txq->dma.dd_desc_len != 0)
2179 mwl_desc_cleanup(sc, &txq->dma);
2183 mwl_rxdma_setup(struct mwl_softc *sc)
2185 struct ifnet *ifp = sc->sc_ifp;
2186 int error, jumbosize, bsize, i;
2187 struct mwl_rxbuf *bf;
2188 struct mwl_jumbo *rbuf;
2189 struct mwl_rxdesc *ds;
2192 error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2193 mwl_rxdesc, sizeof(struct mwl_rxbuf),
2194 1, sizeof(struct mwl_rxdesc));
2199 * Receive is done to a private pool of jumbo buffers.
2200 * This allows us to attach to mbuf's and avoid re-mapping
2201 * memory on each rx we post. We allocate a large chunk
2202 * of memory and manage it in the driver. The mbuf free
2203 * callback method is used to reclaim frames after sending
2204 * them up the stack. By default we allocate 2x the number of
2205 * rx descriptors configured so we have some slop to hold
2206 * us while frames are processed.
2208 if (mwl_rxbuf < 2*mwl_rxdesc) {
2210 "too few rx dma buffers (%d); increasing to %d\n",
2211 mwl_rxbuf, 2*mwl_rxdesc);
2212 mwl_rxbuf = 2*mwl_rxdesc;
2214 jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2215 sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2217 error = bus_dma_tag_create(sc->sc_dmat, /* parent */
2218 PAGE_SIZE, 0, /* alignment, bounds */
2219 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2220 BUS_SPACE_MAXADDR, /* highaddr */
2221 NULL, NULL, /* filter, filterarg */
2222 sc->sc_rxmemsize, /* maxsize */
2224 sc->sc_rxmemsize, /* maxsegsize */
2225 BUS_DMA_ALLOCNOW, /* flags */
2226 NULL, /* lockfunc */
2229 error = bus_dmamap_create(sc->sc_rxdmat, BUS_DMA_NOWAIT, &sc->sc_rxmap);
2231 if_printf(ifp, "could not create rx DMA map\n");
2235 error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2236 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2239 if_printf(ifp, "could not alloc %ju bytes of rx DMA memory\n",
2240 (uintmax_t) sc->sc_rxmemsize);
2244 error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2245 sc->sc_rxmem, sc->sc_rxmemsize,
2246 mwl_load_cb, &sc->sc_rxmem_paddr,
2249 if_printf(ifp, "could not load rx DMA map\n");
2254 * Allocate rx buffers and set them up.
2256 bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2257 bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2259 if_printf(ifp, "malloc of %u rx buffers failed\n", bsize);
2262 sc->sc_rxdma.dd_bufptr = bf;
2264 STAILQ_INIT(&sc->sc_rxbuf);
2265 ds = sc->sc_rxdma.dd_desc;
2266 for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2268 bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2269 /* pre-assign dma buffer */
2270 bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2271 /* NB: tail is intentional to preserve descriptor order */
2272 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2276 * Place remainder of dma memory buffers on the free list.
2278 SLIST_INIT(&sc->sc_rxfree);
2279 for (; i < mwl_rxbuf; i++) {
2280 data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2281 rbuf = MWL_JUMBO_DATA2BUF(data);
2282 SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2290 mwl_rxdma_cleanup(struct mwl_softc *sc)
2292 if (sc->sc_rxmap != NULL)
2293 bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2294 if (sc->sc_rxmem != NULL) {
2295 bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2296 sc->sc_rxmem = NULL;
2298 if (sc->sc_rxmap != NULL) {
2299 bus_dmamap_destroy(sc->sc_rxdmat, sc->sc_rxmap);
2300 sc->sc_rxmap = NULL;
2302 if (sc->sc_rxdma.dd_bufptr != NULL) {
2303 free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2304 sc->sc_rxdma.dd_bufptr = NULL;
2306 if (sc->sc_rxdma.dd_desc_len != 0)
2307 mwl_desc_cleanup(sc, &sc->sc_rxdma);
2311 mwl_dma_setup(struct mwl_softc *sc)
2315 error = mwl_rxdma_setup(sc);
2317 mwl_rxdma_cleanup(sc);
2321 for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2322 error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2324 mwl_dma_cleanup(sc);
2332 mwl_dma_cleanup(struct mwl_softc *sc)
2336 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2337 mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2338 mwl_rxdma_cleanup(sc);
2341 static struct ieee80211_node *
2342 mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2344 struct ieee80211com *ic = vap->iv_ic;
2345 struct mwl_softc *sc = ic->ic_ifp->if_softc;
2346 const size_t space = sizeof(struct mwl_node);
2347 struct mwl_node *mn;
2349 mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2354 DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2355 return &mn->mn_node;
2359 mwl_node_cleanup(struct ieee80211_node *ni)
2361 struct ieee80211com *ic = ni->ni_ic;
2362 struct mwl_softc *sc = ic->ic_ifp->if_softc;
2363 struct mwl_node *mn = MWL_NODE(ni);
2365 DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2366 __func__, ni, ni->ni_ic, mn->mn_staid);
2368 if (mn->mn_staid != 0) {
2369 struct ieee80211vap *vap = ni->ni_vap;
2371 if (mn->mn_hvap != NULL) {
2372 if (vap->iv_opmode == IEEE80211_M_STA)
2373 mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2375 mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2378 * NB: legacy WDS peer sta db entry is installed using
2379 * the associate ap's hvap; use it again to delete it.
2380 * XXX can vap be NULL?
2382 else if (vap->iv_opmode == IEEE80211_M_WDS &&
2383 MWL_VAP(vap)->mv_ap_hvap != NULL)
2384 mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2386 delstaid(sc, mn->mn_staid);
2389 sc->sc_node_cleanup(ni);
2393 * Reclaim rx dma buffers from packets sitting on the ampdu
2394 * reorder queue for a station. We replace buffers with a
2395 * system cluster (if available).
2398 mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2405 n = rap->rxa_qframes;
2406 for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2411 /* our dma buffers have a well-known free routine */
2412 if ((m->m_flags & M_EXT) == 0 ||
2413 m->m_ext.ext_free != mwl_ext_free)
2416 * Try to allocate a cluster and move the data.
2418 off = m->m_data - m->m_ext.ext_buf;
2419 if (off + m->m_pkthdr.len > MCLBYTES) {
2420 /* XXX no AMSDU for now */
2423 cl = pool_cache_get_paddr(&mclpool_cache, 0,
2424 &m->m_ext.ext_paddr);
2427 * Copy the existing data to the cluster, remove
2428 * the rx dma buffer, and attach the cluster in
2429 * its place. Note we preserve the offset to the
2430 * data so frames being bridged can still prepend
2431 * their headers without adding another mbuf.
2433 memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2435 MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2436 /* setup mbuf like _MCLGET does */
2437 m->m_flags |= M_CLUSTER | M_EXT_RW;
2438 _MOWNERREF(m, M_EXT | M_CLUSTER);
2439 /* NB: m_data is clobbered by MEXTADDR, adjust */
2447 * Callback to reclaim resources. We first let the
2448 * net80211 layer do it's thing, then if we are still
2449 * blocked by a lack of rx dma buffers we walk the ampdu
2450 * reorder q's to reclaim buffers by copying to a system
2454 mwl_node_drain(struct ieee80211_node *ni)
2456 struct ieee80211com *ic = ni->ni_ic;
2457 struct mwl_softc *sc = ic->ic_ifp->if_softc;
2458 struct mwl_node *mn = MWL_NODE(ni);
2460 DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2461 __func__, ni, ni->ni_vap, mn->mn_staid);
2463 /* NB: call up first to age out ampdu q's */
2464 sc->sc_node_drain(ni);
2466 /* XXX better to not check low water mark? */
2467 if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2468 (ni->ni_flags & IEEE80211_NODE_HT)) {
2471 * Walk the reorder q and reclaim rx dma buffers by copying
2472 * the packet contents into clusters.
2474 for (tid = 0; tid < WME_NUM_TID; tid++) {
2475 struct ieee80211_rx_ampdu *rap;
2477 rap = &ni->ni_rx_ampdu[tid];
2478 if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2480 if (rap->rxa_qframes)
2481 mwl_ampdu_rxdma_reclaim(rap);
2487 mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2489 *rssi = ni->ni_ic->ic_node_getrssi(ni);
2490 #ifdef MWL_ANT_INFO_SUPPORT
2492 /* XXX need to smooth data */
2493 *noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2495 *noise = -95; /* XXX */
2498 *noise = -95; /* XXX */
2503 * Convert Hardware per-antenna rssi info to common format:
2504 * Let a1, a2, a3 represent the amplitudes per chain
2505 * Let amax represent max[a1, a2, a3]
2506 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2507 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2508 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2509 * maintain some extra precision.
2511 * Values are stored in .5 db format capped at 127.
2514 mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2515 struct ieee80211_mimo_info *mi)
2517 #define CVT(_dst, _src) do { \
2518 (_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2); \
2519 (_dst) = (_dst) > 64 ? 127 : ((_dst) << 1); \
2521 static const int8_t logdbtbl[32] = {
2522 0, 0, 24, 38, 48, 56, 62, 68,
2523 72, 76, 80, 83, 86, 89, 92, 94,
2524 96, 98, 100, 102, 104, 106, 107, 109,
2525 110, 112, 113, 115, 116, 117, 118, 119
2527 const struct mwl_node *mn = MWL_NODE_CONST(ni);
2528 uint8_t rssi = mn->mn_ai.rsvd1/2; /* XXX */
2531 rssi_max = mn->mn_ai.rssi_a;
2532 if (mn->mn_ai.rssi_b > rssi_max)
2533 rssi_max = mn->mn_ai.rssi_b;
2534 if (mn->mn_ai.rssi_c > rssi_max)
2535 rssi_max = mn->mn_ai.rssi_c;
2537 CVT(mi->rssi[0], mn->mn_ai.rssi_a);
2538 CVT(mi->rssi[1], mn->mn_ai.rssi_b);
2539 CVT(mi->rssi[2], mn->mn_ai.rssi_c);
2541 mi->noise[0] = mn->mn_ai.nf_a;
2542 mi->noise[1] = mn->mn_ai.nf_b;
2543 mi->noise[2] = mn->mn_ai.nf_c;
2547 static __inline void *
2548 mwl_getrxdma(struct mwl_softc *sc)
2550 struct mwl_jumbo *buf;
2554 * Allocate from jumbo pool.
2556 MWL_RXFREE_LOCK(sc);
2557 buf = SLIST_FIRST(&sc->sc_rxfree);
2559 DPRINTF(sc, MWL_DEBUG_ANY,
2560 "%s: out of rx dma buffers\n", __func__);
2561 sc->sc_stats.mst_rx_nodmabuf++;
2564 SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2566 data = MWL_JUMBO_BUF2DATA(buf);
2568 MWL_RXFREE_UNLOCK(sc);
2572 static __inline void
2573 mwl_putrxdma(struct mwl_softc *sc, void *data)
2575 struct mwl_jumbo *buf;
2577 /* XXX bounds check data */
2578 MWL_RXFREE_LOCK(sc);
2579 buf = MWL_JUMBO_DATA2BUF(data);
2580 SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2582 MWL_RXFREE_UNLOCK(sc);
2586 mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2588 struct mwl_rxdesc *ds;
2591 if (bf->bf_data == NULL) {
2592 bf->bf_data = mwl_getrxdma(sc);
2593 if (bf->bf_data == NULL) {
2594 /* mark descriptor to be skipped */
2595 ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2596 /* NB: don't need PREREAD */
2597 MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2598 sc->sc_stats.mst_rxbuf_failed++;
2603 * NB: DMA buffer contents is known to be unmodified
2604 * so there's no need to flush the data cache.
2612 ds->Status = EAGLE_RXD_STATUS_IDLE;
2614 ds->PktLen = htole16(MWL_AGGR_SIZE);
2616 ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2617 /* NB: don't touch pPhysNext, set once */
2618 ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2619 MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2625 mwl_ext_free(struct mbuf *m, void *data, void *arg)
2627 struct mwl_softc *sc = arg;
2629 /* XXX bounds check data */
2630 mwl_putrxdma(sc, data);
2632 * If we were previously blocked by a lack of rx dma buffers
2633 * check if we now have enough to restart rx interrupt handling.
2634 * NB: we know we are called at splvm which is above splnet.
2636 if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2637 sc->sc_rxblocked = 0;
2638 mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2640 return (EXT_FREE_OK);
2643 struct mwl_frame_bar {
2646 u_int8_t i_ra[IEEE80211_ADDR_LEN];
2647 u_int8_t i_ta[IEEE80211_ADDR_LEN];
2652 * Like ieee80211_anyhdrsize, but handles BAR frames
2653 * specially so the logic below to piece the 802.11
2654 * header together works.
2657 mwl_anyhdrsize(const void *data)
2659 const struct ieee80211_frame *wh = data;
2661 if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2662 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2663 case IEEE80211_FC0_SUBTYPE_CTS:
2664 case IEEE80211_FC0_SUBTYPE_ACK:
2665 return sizeof(struct ieee80211_frame_ack);
2666 case IEEE80211_FC0_SUBTYPE_BAR:
2667 return sizeof(struct mwl_frame_bar);
2669 return sizeof(struct ieee80211_frame_min);
2671 return ieee80211_hdrsize(data);
2675 mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2677 const struct ieee80211_frame *wh;
2678 struct ieee80211_node *ni;
2680 wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2681 ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2683 ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2684 ieee80211_free_node(ni);
2689 * Convert hardware signal strength to rssi. The value
2690 * provided by the device has the noise floor added in;
2691 * we need to compensate for this but we don't have that
2692 * so we use a fixed value.
2694 * The offset of 8 is good for both 2.4 and 5GHz. The LNA
2695 * offset is already set as part of the initial gain. This
2696 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2699 cvtrssi(uint8_t ssi)
2701 int rssi = (int) ssi + 8;
2702 /* XXX hack guess until we have a real noise floor */
2703 rssi = 2*(87 - rssi); /* NB: .5 dBm units */
2704 return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2708 mwl_rx_proc(void *arg, int npending)
2710 #define IEEE80211_DIR_DSTODS(wh) \
2711 ((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2712 struct mwl_softc *sc = arg;
2713 struct ifnet *ifp = sc->sc_ifp;
2714 struct ieee80211com *ic = ifp->if_l2com;
2715 struct mwl_rxbuf *bf;
2716 struct mwl_rxdesc *ds;
2718 struct ieee80211_qosframe *wh;
2719 struct ieee80211_qosframe_addr4 *wh4;
2720 struct ieee80211_node *ni;
2721 struct mwl_node *mn;
2722 int off, len, hdrlen, pktlen, rssi, ntodo;
2723 uint8_t *data, status;
2727 DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2728 __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2729 RD4(sc, sc->sc_hwspecs.rxDescWrite));
2732 for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2734 bf = STAILQ_FIRST(&sc->sc_rxbuf);
2739 * If data allocation failed previously there
2740 * will be no buffer; try again to re-populate it.
2741 * Note the firmware will not advance to the next
2742 * descriptor with a dma buffer so we must mimic
2743 * this or we'll get out of sync.
2745 DPRINTF(sc, MWL_DEBUG_ANY,
2746 "%s: rx buf w/o dma memory\n", __func__);
2747 (void) mwl_rxbuf_init(sc, bf);
2748 sc->sc_stats.mst_rx_dmabufmissing++;
2751 MWL_RXDESC_SYNC(sc, ds,
2752 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2753 if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2756 if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2757 mwl_printrxbuf(bf, 0);
2759 status = ds->Status;
2760 if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2762 sc->sc_stats.mst_rx_crypto++;
2764 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2765 * for backwards compatibility.
2767 if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2768 (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2770 * MIC error, notify upper layers.
2772 bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2773 BUS_DMASYNC_POSTREAD);
2774 mwl_handlemicerror(ic, data);
2775 sc->sc_stats.mst_rx_tkipmic++;
2777 /* XXX too painful to tap packets */
2781 * Sync the data buffer.
2783 len = le16toh(ds->PktLen);
2784 bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2786 * The 802.11 header is provided all or in part at the front;
2787 * use it to calculate the true size of the header that we'll
2788 * construct below. We use this to figure out where to copy
2789 * payload prior to constructing the header.
2791 hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2792 off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2794 /* calculate rssi early so we can re-use for each aggregate */
2795 rssi = cvtrssi(ds->RSSI);
2797 pktlen = hdrlen + (len - off);
2799 * NB: we know our frame is at least as large as
2800 * IEEE80211_MIN_LEN because there is a 4-address
2801 * frame at the front. Hence there's no need to
2802 * vet the packet length. If the frame in fact
2803 * is too small it should be discarded at the
2808 * Attach dma buffer to an mbuf. We tried
2809 * doing this based on the packet size (i.e.
2810 * copying small packets) but it turns out to
2811 * be a net loss. The tradeoff might be system
2812 * dependent (cache architecture is important).
2814 MGETHDR(m, M_NOWAIT, MT_DATA);
2816 DPRINTF(sc, MWL_DEBUG_ANY,
2817 "%s: no rx mbuf\n", __func__);
2818 sc->sc_stats.mst_rx_nombuf++;
2822 * Acquire the replacement dma buffer before
2823 * processing the frame. If we're out of dma
2824 * buffers we disable rx interrupts and wait
2825 * for the free pool to reach mlw_rxdmalow buffers
2826 * before starting to do work again. If the firmware
2827 * runs out of descriptors then it will toss frames
2828 * which is better than our doing it as that can
2829 * starve our processing. It is also important that
2830 * we always process rx'd frames in case they are
2831 * A-MPDU as otherwise the host's view of the BA
2832 * window may get out of sync with the firmware.
2834 newdata = mwl_getrxdma(sc);
2835 if (newdata == NULL) {
2836 /* NB: stat+msg in mwl_getrxdma */
2838 /* disable RX interrupt and mark state */
2839 mwl_hal_intrset(sc->sc_mh,
2840 sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2841 sc->sc_rxblocked = 1;
2842 ieee80211_drain(ic);
2843 /* XXX check rxblocked and immediately start again? */
2846 bf->bf_data = newdata;
2848 * Attach the dma buffer to the mbuf;
2849 * mwl_rxbuf_init will re-setup the rx
2850 * descriptor using the replacement dma
2851 * buffer we just installed above.
2853 MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2854 data, sc, 0, EXT_NET_DRV);
2855 m->m_data += off - hdrlen;
2856 m->m_pkthdr.len = m->m_len = pktlen;
2857 m->m_pkthdr.rcvif = ifp;
2858 /* NB: dma buffer assumed read-only */
2861 * Piece 802.11 header together.
2863 wh = mtod(m, struct ieee80211_qosframe *);
2864 /* NB: don't need to do this sometimes but ... */
2865 /* XXX special case so we can memcpy after m_devget? */
2866 ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2867 if (IEEE80211_QOS_HAS_SEQ(wh)) {
2868 if (IEEE80211_DIR_DSTODS(wh)) {
2870 struct ieee80211_qosframe_addr4*);
2871 *(uint16_t *)wh4->i_qos = ds->QosCtrl;
2873 *(uint16_t *)wh->i_qos = ds->QosCtrl;
2877 * The f/w strips WEP header but doesn't clear
2878 * the WEP bit; mark the packet with M_WEP so
2879 * net80211 will treat the data as decrypted.
2880 * While here also clear the PWR_MGT bit since
2881 * power save is handled by the firmware and
2882 * passing this up will potentially cause the
2883 * upper layer to put a station in power save
2884 * (except when configured with MWL_HOST_PS_SUPPORT).
2886 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
2887 m->m_flags |= M_WEP;
2888 #ifdef MWL_HOST_PS_SUPPORT
2889 wh->i_fc[1] &= ~IEEE80211_FC1_WEP;
2891 wh->i_fc[1] &= ~(IEEE80211_FC1_WEP | IEEE80211_FC1_PWR_MGT);
2894 if (ieee80211_radiotap_active(ic)) {
2895 struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2898 tap->wr_rate = ds->Rate;
2899 tap->wr_antsignal = rssi + nf;
2900 tap->wr_antnoise = nf;
2902 if (IFF_DUMPPKTS_RECV(sc, wh)) {
2903 ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2904 len, ds->Rate, rssi);
2909 ni = ieee80211_find_rxnode(ic,
2910 (const struct ieee80211_frame_min *) wh);
2913 #ifdef MWL_ANT_INFO_SUPPORT
2914 mn->mn_ai.rssi_a = ds->ai.rssi_a;
2915 mn->mn_ai.rssi_b = ds->ai.rssi_b;
2916 mn->mn_ai.rssi_c = ds->ai.rssi_c;
2917 mn->mn_ai.rsvd1 = rssi;
2919 /* tag AMPDU aggregates for reorder processing */
2920 if (ni->ni_flags & IEEE80211_NODE_HT)
2921 m->m_flags |= M_AMPDU;
2922 (void) ieee80211_input(ni, m, rssi, nf);
2923 ieee80211_free_node(ni);
2925 (void) ieee80211_input_all(ic, m, rssi, nf);
2927 /* NB: ignore ENOMEM so we process more descriptors */
2928 (void) mwl_rxbuf_init(sc, bf);
2929 bf = STAILQ_NEXT(bf, bf_list);
2934 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
2935 !IFQ_IS_EMPTY(&ifp->if_snd)) {
2936 /* NB: kick fw; the tx thread may have been preempted */
2937 mwl_hal_txstart(sc->sc_mh, 0);
2940 #undef IEEE80211_DIR_DSTODS
2944 mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2946 struct mwl_txbuf *bf, *bn;
2947 struct mwl_txdesc *ds;
2949 MWL_TXQ_LOCK_INIT(sc, txq);
2951 txq->txpri = 0; /* XXX */
2953 /* NB: q setup by mwl_txdma_setup XXX */
2954 STAILQ_INIT(&txq->free);
2956 STAILQ_FOREACH(bf, &txq->free, bf_list) {
2960 bn = STAILQ_NEXT(bf, bf_list);
2962 bn = STAILQ_FIRST(&txq->free);
2963 ds->pPhysNext = htole32(bn->bf_daddr);
2965 STAILQ_INIT(&txq->active);
2969 * Setup a hardware data transmit queue for the specified
2970 * access control. We record the mapping from ac's
2971 * to h/w queues for use by mwl_tx_start.
2974 mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2976 #define N(a) (sizeof(a)/sizeof(a[0]))
2977 struct mwl_txq *txq;
2979 if (ac >= N(sc->sc_ac2q)) {
2980 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2981 ac, N(sc->sc_ac2q));
2984 if (mvtype >= MWL_NUM_TX_QUEUES) {
2985 device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2986 mvtype, MWL_NUM_TX_QUEUES);
2989 txq = &sc->sc_txq[mvtype];
2990 mwl_txq_init(sc, txq, mvtype);
2991 sc->sc_ac2q[ac] = txq;
2997 * Update WME parameters for a transmit queue.
3000 mwl_txq_update(struct mwl_softc *sc, int ac)
3002 #define MWL_EXPONENT_TO_VALUE(v) ((1<<v)-1)
3003 struct ifnet *ifp = sc->sc_ifp;
3004 struct ieee80211com *ic = ifp->if_l2com;
3005 struct mwl_txq *txq = sc->sc_ac2q[ac];
3006 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
3007 struct mwl_hal *mh = sc->sc_mh;
3008 int aifs, cwmin, cwmax, txoplim;
3010 aifs = wmep->wmep_aifsn;
3011 /* XXX in sta mode need to pass log values for cwmin/max */
3012 cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
3013 cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
3014 txoplim = wmep->wmep_txopLimit; /* NB: units of 32us */
3016 if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
3017 device_printf(sc->sc_dev, "unable to update hardware queue "
3018 "parameters for %s traffic!\n",
3019 ieee80211_wme_acnames[ac]);
3023 #undef MWL_EXPONENT_TO_VALUE
3027 * Callback from the 802.11 layer to update WME parameters.
3030 mwl_wme_update(struct ieee80211com *ic)
3032 struct mwl_softc *sc = ic->ic_ifp->if_softc;
3034 return !mwl_txq_update(sc, WME_AC_BE) ||
3035 !mwl_txq_update(sc, WME_AC_BK) ||
3036 !mwl_txq_update(sc, WME_AC_VI) ||
3037 !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
3041 * Reclaim resources for a setup queue.
3044 mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
3047 MWL_TXQ_LOCK_DESTROY(txq);
3051 * Reclaim all tx queue resources.
3054 mwl_tx_cleanup(struct mwl_softc *sc)
3058 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3059 mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
3063 mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
3069 * Load the DMA map so any coalescing is done. This
3070 * also calculates the number of descriptors we need.
3072 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3073 bf->bf_segs, &bf->bf_nseg,
3075 if (error == EFBIG) {
3076 /* XXX packet requires too many descriptors */
3077 bf->bf_nseg = MWL_TXDESC+1;
3078 } else if (error != 0) {
3079 sc->sc_stats.mst_tx_busdma++;
3084 * Discard null packets and check for packets that
3085 * require too many TX descriptors. We try to convert
3086 * the latter to a cluster.
3088 if (error == EFBIG) { /* too many desc's, linearize */
3089 sc->sc_stats.mst_tx_linear++;
3091 m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
3093 m = m_defrag(m0, M_NOWAIT);
3097 sc->sc_stats.mst_tx_nombuf++;
3101 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3102 bf->bf_segs, &bf->bf_nseg,
3105 sc->sc_stats.mst_tx_busdma++;
3109 KASSERT(bf->bf_nseg <= MWL_TXDESC,
3110 ("too many segments after defrag; nseg %u", bf->bf_nseg));
3111 } else if (bf->bf_nseg == 0) { /* null packet, discard */
3112 sc->sc_stats.mst_tx_nodata++;
3116 DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3117 __func__, m0, m0->m_pkthdr.len);
3118 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3125 mwl_cvtlegacyrate(int rate)
3146 * Calculate fixed tx rate information per client state;
3147 * this value is suitable for writing to the Format field
3148 * of a tx descriptor.
3151 mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3155 fmt = SM(3, EAGLE_TXD_ANTENNA)
3156 | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3157 EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3158 if (rate & IEEE80211_RATE_MCS) { /* HT MCS */
3159 fmt |= EAGLE_TXD_FORMAT_HT
3160 /* NB: 0x80 implicitly stripped from ucastrate */
3161 | SM(rate, EAGLE_TXD_RATE);
3162 /* XXX short/long GI may be wrong; re-check */
3163 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3164 fmt |= EAGLE_TXD_CHW_40
3165 | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3166 EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3168 fmt |= EAGLE_TXD_CHW_20
3169 | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3170 EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3172 } else { /* legacy rate */
3173 fmt |= EAGLE_TXD_FORMAT_LEGACY
3174 | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3176 /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3177 | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3178 EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3184 mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3187 #define IEEE80211_DIR_DSTODS(wh) \
3188 ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
3189 struct ifnet *ifp = sc->sc_ifp;
3190 struct ieee80211com *ic = ifp->if_l2com;
3191 struct ieee80211vap *vap = ni->ni_vap;
3192 int error, iswep, ismcast;
3193 int hdrlen, copyhdrlen, pktlen;
3194 struct mwl_txdesc *ds;
3195 struct mwl_txq *txq;
3196 struct ieee80211_frame *wh;
3197 struct mwltxrec *tr;
3198 struct mwl_node *mn;
3204 wh = mtod(m0, struct ieee80211_frame *);
3205 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
3206 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3207 hdrlen = ieee80211_anyhdrsize(wh);
3208 copyhdrlen = hdrlen;
3209 pktlen = m0->m_pkthdr.len;
3210 if (IEEE80211_QOS_HAS_SEQ(wh)) {
3211 if (IEEE80211_DIR_DSTODS(wh)) {
3213 (((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
3214 copyhdrlen -= sizeof(qos);
3217 (((struct ieee80211_qosframe *) wh)->i_qos);
3222 const struct ieee80211_cipher *cip;
3223 struct ieee80211_key *k;
3226 * Construct the 802.11 header+trailer for an encrypted
3227 * frame. The only reason this can fail is because of an
3228 * unknown or unsupported cipher/key type.
3230 * NB: we do this even though the firmware will ignore
3231 * what we've done for WEP and TKIP as we need the
3232 * ExtIV filled in for CCMP and this also adjusts
3233 * the headers which simplifies our work below.
3235 k = ieee80211_crypto_encap(ni, m0);
3238 * This can happen when the key is yanked after the
3239 * frame was queued. Just discard the frame; the
3240 * 802.11 layer counts failures and provides
3241 * debugging/diagnostics.
3247 * Adjust the packet length for the crypto additions
3248 * done during encap and any other bits that the f/w
3249 * will add later on.
3252 pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3254 /* packet header may have moved, reset our local pointer */
3255 wh = mtod(m0, struct ieee80211_frame *);
3258 if (ieee80211_radiotap_active_vap(vap)) {
3259 sc->sc_tx_th.wt_flags = 0; /* XXX */
3261 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3263 sc->sc_tx_th.wt_rate = ds->DataRate;
3265 sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3266 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3268 ieee80211_radiotap_tx(vap, m0);
3271 * Copy up/down the 802.11 header; the firmware requires
3272 * we present a 2-byte payload length followed by a
3273 * 4-address header (w/o QoS), followed (optionally) by
3274 * any WEP/ExtIV header (but only filled in for CCMP).
3275 * We are assured the mbuf has sufficient headroom to
3276 * prepend in-place by the setup of ic_headroom in
3279 if (hdrlen < sizeof(struct mwltxrec)) {
3280 const int space = sizeof(struct mwltxrec) - hdrlen;
3281 if (M_LEADINGSPACE(m0) < space) {
3282 /* NB: should never happen */
3283 device_printf(sc->sc_dev,
3284 "not enough headroom, need %d found %zd, "
3285 "m_flags 0x%x m_len %d\n",
3286 space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3287 ieee80211_dump_pkt(ic,
3288 mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3290 sc->sc_stats.mst_tx_noheadroom++;
3293 M_PREPEND(m0, space, M_NOWAIT);
3295 tr = mtod(m0, struct mwltxrec *);
3296 if (wh != (struct ieee80211_frame *) &tr->wh)
3297 ovbcopy(wh, &tr->wh, hdrlen);
3299 * Note: the "firmware length" is actually the length
3300 * of the fully formed "802.11 payload". That is, it's
3301 * everything except for the 802.11 header. In particular
3302 * this includes all crypto material including the MIC!
3304 tr->fwlen = htole16(pktlen - hdrlen);
3307 * Load the DMA map so any coalescing is done. This
3308 * also calculates the number of descriptors we need.
3310 error = mwl_tx_dmasetup(sc, bf, m0);
3312 /* NB: stat collected in mwl_tx_dmasetup */
3313 DPRINTF(sc, MWL_DEBUG_XMIT,
3314 "%s: unable to setup dma\n", __func__);
3317 bf->bf_node = ni; /* NB: held reference */
3318 m0 = bf->bf_m; /* NB: may have changed */
3319 tr = mtod(m0, struct mwltxrec *);
3320 wh = (struct ieee80211_frame *)&tr->wh;
3323 * Formulate tx descriptor.
3328 ds->QosCtrl = qos; /* NB: already little-endian */
3331 * NB: multiframes should be zero because the descriptors
3332 * are initialized to zero. This should handle the case
3333 * where the driver is built with MWL_TXDESC=1 but we are
3334 * using firmware with multi-segment support.
3336 ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3337 ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3339 ds->multiframes = htole32(bf->bf_nseg);
3340 ds->PktLen = htole16(m0->m_pkthdr.len);
3341 for (i = 0; i < bf->bf_nseg; i++) {
3342 ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3343 ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3346 /* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3349 ds->ack_wcb_addr = 0;
3353 * Select transmit rate.
3355 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3356 case IEEE80211_FC0_TYPE_MGT:
3357 sc->sc_stats.mst_tx_mgmt++;
3359 case IEEE80211_FC0_TYPE_CTL:
3360 /* NB: assign to BE q to avoid bursting */
3361 ds->TxPriority = MWL_WME_AC_BE;
3363 case IEEE80211_FC0_TYPE_DATA:
3365 const struct ieee80211_txparam *tp = ni->ni_txparms;
3367 * EAPOL frames get forced to a fixed rate and w/o
3368 * aggregation; otherwise check for any fixed rate
3369 * for the client (may depend on association state).
3371 if (m0->m_flags & M_EAPOL) {
3372 const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3373 ds->Format = mvp->mv_eapolformat;
3375 EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3376 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3377 /* XXX pre-calculate per node */
3378 ds->Format = htole16(
3379 mwl_calcformat(tp->ucastrate, ni));
3380 ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3382 /* NB: EAPOL frames will never have qos set */
3384 ds->TxPriority = txq->qnum;
3386 else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3387 ds->TxPriority = mn->mn_ba[3].txq;
3390 else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3391 ds->TxPriority = mn->mn_ba[2].txq;
3394 else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3395 ds->TxPriority = mn->mn_ba[1].txq;
3398 else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3399 ds->TxPriority = mn->mn_ba[0].txq;
3402 ds->TxPriority = txq->qnum;
3404 ds->TxPriority = txq->qnum;
3407 if_printf(ifp, "bogus frame type 0x%x (%s)\n",
3408 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3409 sc->sc_stats.mst_tx_badframetype++;
3414 if (IFF_DUMPPKTS_XMIT(sc))
3415 ieee80211_dump_pkt(ic,
3416 mtod(m0, const uint8_t *)+sizeof(uint16_t),
3417 m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3420 ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3421 STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3422 MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3425 sc->sc_tx_timer = 5;
3426 MWL_TXQ_UNLOCK(txq);
3429 #undef IEEE80211_DIR_DSTODS
3433 mwl_cvtlegacyrix(int rix)
3435 #define N(x) (sizeof(x)/sizeof(x[0]))
3436 static const int ieeerates[] =
3437 { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3438 return (rix < N(ieeerates) ? ieeerates[rix] : 0);
3443 * Process completed xmit descriptors from the specified queue.
3446 mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3448 #define EAGLE_TXD_STATUS_MCAST \
3449 (EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3450 struct ifnet *ifp = sc->sc_ifp;
3451 struct ieee80211com *ic = ifp->if_l2com;
3452 struct mwl_txbuf *bf;
3453 struct mwl_txdesc *ds;
3454 struct ieee80211_node *ni;
3455 struct mwl_node *an;
3459 DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3460 for (nreaped = 0;; nreaped++) {
3462 bf = STAILQ_FIRST(&txq->active);
3464 MWL_TXQ_UNLOCK(txq);
3468 MWL_TXDESC_SYNC(txq, ds,
3469 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3470 if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3471 MWL_TXQ_UNLOCK(txq);
3474 STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3475 MWL_TXQ_UNLOCK(txq);
3478 if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3479 mwl_printtxbuf(bf, txq->qnum, nreaped);
3484 status = le32toh(ds->Status);
3485 if (status & EAGLE_TXD_STATUS_OK) {
3486 uint16_t Format = le16toh(ds->Format);
3487 uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3489 sc->sc_stats.mst_ant_tx[txant]++;
3490 if (status & EAGLE_TXD_STATUS_OK_RETRY)
3491 sc->sc_stats.mst_tx_retries++;
3492 if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3493 sc->sc_stats.mst_tx_mretries++;
3494 if (txq->qnum >= MWL_WME_AC_VO)
3495 ic->ic_wme.wme_hipri_traffic++;
3496 ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3497 if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3498 ni->ni_txrate = mwl_cvtlegacyrix(
3501 ni->ni_txrate |= IEEE80211_RATE_MCS;
3502 sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3504 if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3505 sc->sc_stats.mst_tx_linkerror++;
3506 if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3507 sc->sc_stats.mst_tx_xretries++;
3508 if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3509 sc->sc_stats.mst_tx_aging++;
3510 if (bf->bf_m->m_flags & M_FF)
3511 sc->sc_stats.mst_ff_txerr++;
3514 * Do any tx complete callback. Note this must
3515 * be done before releasing the node reference.
3516 * XXX no way to figure out if frame was ACK'd
3518 if (bf->bf_m->m_flags & M_TXCB) {
3519 /* XXX strip fw len in case header inspected */
3520 m_adj(bf->bf_m, sizeof(uint16_t));
3521 ieee80211_process_callback(ni, bf->bf_m,
3522 (status & EAGLE_TXD_STATUS_OK) == 0);
3525 * Reclaim reference to node.
3527 * NB: the node may be reclaimed here if, for example
3528 * this is a DEAUTH message that was sent and the
3529 * node was timed out due to inactivity.
3531 ieee80211_free_node(ni);
3533 ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3535 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3536 BUS_DMASYNC_POSTWRITE);
3537 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3540 mwl_puttxbuf_tail(txq, bf);
3543 #undef EAGLE_TXD_STATUS_MCAST
3547 * Deferred processing of transmit interrupt; special-cased
3548 * for four hardware queues, 0-3.
3551 mwl_tx_proc(void *arg, int npending)
3553 struct mwl_softc *sc = arg;
3554 struct ifnet *ifp = sc->sc_ifp;
3558 * Process each active queue.
3561 if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3562 nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3563 if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3564 nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3565 if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3566 nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3567 if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3568 nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3571 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3572 sc->sc_tx_timer = 0;
3573 if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
3574 /* NB: kick fw; the tx thread may have been preempted */
3575 mwl_hal_txstart(sc->sc_mh, 0);
3582 mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3584 struct ieee80211_node *ni;
3585 struct mwl_txbuf *bf;
3589 * NB: this assumes output has been stopped and
3590 * we do not need to block mwl_tx_tasklet
3592 for (ix = 0;; ix++) {
3594 bf = STAILQ_FIRST(&txq->active);
3596 MWL_TXQ_UNLOCK(txq);
3599 STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3600 MWL_TXQ_UNLOCK(txq);
3602 if (sc->sc_debug & MWL_DEBUG_RESET) {
3603 struct ifnet *ifp = sc->sc_ifp;
3604 struct ieee80211com *ic = ifp->if_l2com;
3605 const struct mwltxrec *tr =
3606 mtod(bf->bf_m, const struct mwltxrec *);
3607 mwl_printtxbuf(bf, txq->qnum, ix);
3608 ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3609 bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3611 #endif /* MWL_DEBUG */
3612 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3616 * Reclaim node reference.
3618 ieee80211_free_node(ni);
3622 mwl_puttxbuf_tail(txq, bf);
3627 * Drain the transmit queues and reclaim resources.
3630 mwl_draintxq(struct mwl_softc *sc)
3632 struct ifnet *ifp = sc->sc_ifp;
3635 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3636 mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3637 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3638 sc->sc_tx_timer = 0;
3643 * Reset the transmit queues to a pristine state after a fw download.
3646 mwl_resettxq(struct mwl_softc *sc)
3650 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3651 mwl_txq_reset(sc, &sc->sc_txq[i]);
3653 #endif /* MWL_DIAGAPI */
3656 * Clear the transmit queues of any frames submitted for the
3657 * specified vap. This is done when the vap is deleted so we
3658 * don't potentially reference the vap after it is gone.
3659 * Note we cannot remove the frames; we only reclaim the node
3663 mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3665 struct mwl_txq *txq;
3666 struct mwl_txbuf *bf;
3669 for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3670 txq = &sc->sc_txq[i];
3672 STAILQ_FOREACH(bf, &txq->active, bf_list) {
3673 struct ieee80211_node *ni = bf->bf_node;
3674 if (ni != NULL && ni->ni_vap == vap) {
3676 ieee80211_free_node(ni);
3679 MWL_TXQ_UNLOCK(txq);
3684 mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3685 const uint8_t *frm, const uint8_t *efrm)
3687 struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3688 const struct ieee80211_action *ia;
3690 ia = (const struct ieee80211_action *) frm;
3691 if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3692 ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3693 const struct ieee80211_action_ht_mimopowersave *mps =
3694 (const struct ieee80211_action_ht_mimopowersave *) ia;
3696 mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3697 mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3698 MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3701 return sc->sc_recv_action(ni, wh, frm, efrm);
3705 mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3706 int dialogtoken, int baparamset, int batimeout)
3708 struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3709 struct ieee80211vap *vap = ni->ni_vap;
3710 struct mwl_node *mn = MWL_NODE(ni);
3711 struct mwl_bastate *bas;
3713 bas = tap->txa_private;
3715 const MWL_HAL_BASTREAM *sp;
3717 * Check for a free BA stream slot.
3720 if (mn->mn_ba[3].bastream == NULL)
3721 bas = &mn->mn_ba[3];
3725 if (mn->mn_ba[2].bastream == NULL)
3726 bas = &mn->mn_ba[2];
3730 if (mn->mn_ba[1].bastream == NULL)
3731 bas = &mn->mn_ba[1];
3735 if (mn->mn_ba[0].bastream == NULL)
3736 bas = &mn->mn_ba[0];
3740 /* sta already has max BA streams */
3741 /* XXX assign BA stream to highest priority tid */
3742 DPRINTF(sc, MWL_DEBUG_AMPDU,
3743 "%s: already has max bastreams\n", __func__);
3744 sc->sc_stats.mst_ampdu_reject++;
3747 /* NB: no held reference to ni */
3748 sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3749 (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3750 ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
3754 * No available stream, return 0 so no
3755 * a-mpdu aggregation will be done.
3757 DPRINTF(sc, MWL_DEBUG_AMPDU,
3758 "%s: no bastream available\n", __func__);
3759 sc->sc_stats.mst_ampdu_nostream++;
3762 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3764 /* NB: qos is left zero so we won't match in mwl_tx_start */
3766 tap->txa_private = bas;
3768 /* fetch current seq# from the firmware; if available */
3769 if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3770 vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3771 &tap->txa_start) != 0)
3773 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3777 mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3778 int code, int baparamset, int batimeout)
3780 struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3781 struct mwl_bastate *bas;
3783 bas = tap->txa_private;
3785 /* XXX should not happen */
3786 DPRINTF(sc, MWL_DEBUG_AMPDU,
3787 "%s: no BA stream allocated, TID %d\n",
3788 __func__, tap->txa_tid);
3789 sc->sc_stats.mst_addba_nostream++;
3792 if (code == IEEE80211_STATUS_SUCCESS) {
3793 struct ieee80211vap *vap = ni->ni_vap;
3797 * Tell the firmware to setup the BA stream;
3798 * we know resources are available because we
3799 * pre-allocated one before forming the request.
3801 bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3803 bufsiz = IEEE80211_AGGR_BAWMAX;
3804 error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3805 bas->bastream, bufsiz, bufsiz, tap->txa_start);
3808 * Setup failed, return immediately so no a-mpdu
3809 * aggregation will be done.
3811 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3812 mwl_bastream_free(bas);
3813 tap->txa_private = NULL;
3815 DPRINTF(sc, MWL_DEBUG_AMPDU,
3816 "%s: create failed, error %d, bufsiz %d TID %d "
3817 "htparam 0x%x\n", __func__, error, bufsiz,
3818 tap->txa_tid, ni->ni_htparam);
3819 sc->sc_stats.mst_bacreate_failed++;
3822 /* NB: cache txq to avoid ptr indirect */
3823 mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3824 DPRINTF(sc, MWL_DEBUG_AMPDU,
3825 "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3826 "htparam 0x%x\n", __func__, bas->bastream,
3827 bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3830 * Other side NAK'd us; return the resources.
3832 DPRINTF(sc, MWL_DEBUG_AMPDU,
3833 "%s: request failed with code %d, destroy bastream %p\n",
3834 __func__, code, bas->bastream);
3835 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3836 mwl_bastream_free(bas);
3837 tap->txa_private = NULL;
3839 /* NB: firmware sends BAR so we don't need to */
3840 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3844 mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3846 struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3847 struct mwl_bastate *bas;
3849 bas = tap->txa_private;
3851 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3852 __func__, bas->bastream);
3853 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3854 mwl_bastream_free(bas);
3855 tap->txa_private = NULL;
3857 sc->sc_addba_stop(ni, tap);
3861 * Setup the rx data structures. This should only be
3862 * done once or we may get out of sync with the firmware.
3865 mwl_startrecv(struct mwl_softc *sc)
3867 if (!sc->sc_recvsetup) {
3868 struct mwl_rxbuf *bf, *prev;
3869 struct mwl_rxdesc *ds;
3872 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3873 int error = mwl_rxbuf_init(sc, bf);
3875 DPRINTF(sc, MWL_DEBUG_RECV,
3876 "%s: mwl_rxbuf_init failed %d\n",
3882 ds->pPhysNext = htole32(bf->bf_daddr);
3889 htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3891 sc->sc_recvsetup = 1;
3893 mwl_mode_init(sc); /* set filters, etc. */
3897 static MWL_HAL_APMODE
3898 mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3900 MWL_HAL_APMODE mode;
3902 if (IEEE80211_IS_CHAN_HT(chan)) {
3903 if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3904 mode = AP_MODE_N_ONLY;
3905 else if (IEEE80211_IS_CHAN_5GHZ(chan))
3906 mode = AP_MODE_AandN;
3907 else if (vap->iv_flags & IEEE80211_F_PUREG)
3908 mode = AP_MODE_GandN;
3910 mode = AP_MODE_BandGandN;
3911 } else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3912 if (vap->iv_flags & IEEE80211_F_PUREG)
3913 mode = AP_MODE_G_ONLY;
3915 mode = AP_MODE_MIXED;
3916 } else if (IEEE80211_IS_CHAN_B(chan))
3917 mode = AP_MODE_B_ONLY;
3918 else if (IEEE80211_IS_CHAN_A(chan))
3919 mode = AP_MODE_A_ONLY;
3921 mode = AP_MODE_MIXED; /* XXX should not happen? */
3926 mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3928 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3929 return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3933 * Set/change channels.
3936 mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3938 struct mwl_hal *mh = sc->sc_mh;
3939 struct ifnet *ifp = sc->sc_ifp;
3940 struct ieee80211com *ic = ifp->if_l2com;
3941 MWL_HAL_CHANNEL hchan;
3944 DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3945 __func__, chan->ic_freq, chan->ic_flags);
3948 * Convert to a HAL channel description with
3949 * the flags constrained to reflect the current
3952 mwl_mapchan(&hchan, chan);
3953 mwl_hal_intrset(mh, 0); /* disable interrupts */
3955 mwl_draintxq(sc); /* clear pending tx frames */
3957 mwl_hal_setchannel(mh, &hchan);
3959 * Tx power is cap'd by the regulatory setting and
3960 * possibly a user-set limit. We pass the min of
3961 * these to the hal to apply them to the cal data
3965 maxtxpow = 2*chan->ic_maxregpower;
3966 if (maxtxpow > ic->ic_txpowlimit)
3967 maxtxpow = ic->ic_txpowlimit;
3968 mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3969 /* NB: potentially change mcast/mgt rates */
3970 mwl_setcurchanrates(sc);
3973 * Update internal state.
3975 sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3976 sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3977 if (IEEE80211_IS_CHAN_A(chan)) {
3978 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3979 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3980 } else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3981 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3982 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3984 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3985 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3987 sc->sc_curchan = hchan;
3988 mwl_hal_intrset(mh, sc->sc_imask);
3994 mwl_scan_start(struct ieee80211com *ic)
3996 struct ifnet *ifp = ic->ic_ifp;
3997 struct mwl_softc *sc = ifp->if_softc;
3999 DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
4003 mwl_scan_end(struct ieee80211com *ic)
4005 struct ifnet *ifp = ic->ic_ifp;
4006 struct mwl_softc *sc = ifp->if_softc;
4008 DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
4012 mwl_set_channel(struct ieee80211com *ic)
4014 struct ifnet *ifp = ic->ic_ifp;
4015 struct mwl_softc *sc = ifp->if_softc;
4017 (void) mwl_chan_set(sc, ic->ic_curchan);
4021 * Handle a channel switch request. We inform the firmware
4022 * and mark the global state to suppress various actions.
4023 * NB: we issue only one request to the fw; we may be called
4024 * multiple times if there are multiple vap's.
4027 mwl_startcsa(struct ieee80211vap *vap)
4029 struct ieee80211com *ic = vap->iv_ic;
4030 struct mwl_softc *sc = ic->ic_ifp->if_softc;
4031 MWL_HAL_CHANNEL hchan;
4033 if (sc->sc_csapending)
4036 mwl_mapchan(&hchan, ic->ic_csa_newchan);
4037 /* 1 =>'s quiet channel */
4038 mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
4039 sc->sc_csapending = 1;
4043 * Plumb any static WEP key for the station. This is
4044 * necessary as we must propagate the key from the
4045 * global key table of the vap to each sta db entry.
4048 mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4050 if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
4051 IEEE80211_F_PRIVACY &&
4052 vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
4053 vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
4054 (void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac);
4058 mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
4060 #define WME(ie) ((const struct ieee80211_wme_info *) ie)
4061 struct ieee80211vap *vap = ni->ni_vap;
4062 struct mwl_hal_vap *hvap;
4065 if (vap->iv_opmode == IEEE80211_M_WDS) {
4067 * WDS vap's do not have a f/w vap; instead they piggyback
4068 * on an AP vap and we must install the sta db entry and
4069 * crypto state using that AP's handle (the WDS vap has none).
4071 hvap = MWL_VAP(vap)->mv_ap_hvap;
4073 hvap = MWL_VAP(vap)->mv_hvap;
4074 error = mwl_hal_newstation(hvap, ni->ni_macaddr,
4076 ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
4077 ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
4080 * Setup security for this station. For sta mode this is
4081 * needed even though do the same thing on transition to
4082 * AUTH state because the call to mwl_hal_newstation
4083 * clobbers the crypto state we setup.
4085 mwl_setanywepkey(vap, ni->ni_macaddr);
4092 mwl_setglobalkeys(struct ieee80211vap *vap)
4094 struct ieee80211_key *wk;
4096 wk = &vap->iv_nw_keys[0];
4097 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
4098 if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4099 (void) mwl_key_set(vap, wk, vap->iv_myaddr);
4103 * Convert a legacy rate set to a firmware bitmask.
4106 get_rate_bitmap(const struct ieee80211_rateset *rs)
4112 for (i = 0; i < rs->rs_nrates; i++)
4113 switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
4114 case 2: rates |= 0x001; break;
4115 case 4: rates |= 0x002; break;
4116 case 11: rates |= 0x004; break;
4117 case 22: rates |= 0x008; break;
4118 case 44: rates |= 0x010; break;
4119 case 12: rates |= 0x020; break;
4120 case 18: rates |= 0x040; break;
4121 case 24: rates |= 0x080; break;
4122 case 36: rates |= 0x100; break;
4123 case 48: rates |= 0x200; break;
4124 case 72: rates |= 0x400; break;
4125 case 96: rates |= 0x800; break;
4126 case 108: rates |= 0x1000; break;
4132 * Construct an HT firmware bitmask from an HT rate set.
4135 get_htrate_bitmap(const struct ieee80211_htrateset *rs)
4141 for (i = 0; i < rs->rs_nrates; i++) {
4142 if (rs->rs_rates[i] < 16)
4143 rates |= 1<<rs->rs_rates[i];
4149 * Craft station database entry for station.
4150 * NB: use host byte order here, the hal handles byte swapping.
4152 static MWL_HAL_PEERINFO *
4153 mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4155 const struct ieee80211vap *vap = ni->ni_vap;
4157 memset(pi, 0, sizeof(*pi));
4158 pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4159 pi->CapInfo = ni->ni_capinfo;
4160 if (ni->ni_flags & IEEE80211_NODE_HT) {
4161 /* HT capabilities, etc */
4162 pi->HTCapabilitiesInfo = ni->ni_htcap;
4163 /* XXX pi.HTCapabilitiesInfo */
4164 pi->MacHTParamInfo = ni->ni_htparam;
4165 pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4166 pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4167 pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4168 pi->AddHtInfo.OpMode = ni->ni_htopmode;
4169 pi->AddHtInfo.stbc = ni->ni_htstbc;
4171 /* constrain according to local configuration */
4172 if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4173 pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4174 if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4175 pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4176 if (ni->ni_chw != 40)
4177 pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4183 * Re-create the local sta db entry for a vap to ensure
4184 * up to date WME state is pushed to the firmware. Because
4185 * this resets crypto state this must be followed by a
4186 * reload of any keys in the global key table.
4189 mwl_localstadb(struct ieee80211vap *vap)
4191 #define WME(ie) ((const struct ieee80211_wme_info *) ie)
4192 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4193 struct ieee80211_node *bss;
4194 MWL_HAL_PEERINFO pi;
4197 switch (vap->iv_opmode) {
4198 case IEEE80211_M_STA:
4200 error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4201 vap->iv_state == IEEE80211_S_RUN ?
4202 mkpeerinfo(&pi, bss) : NULL,
4203 (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4204 bss->ni_ies.wme_ie != NULL ?
4205 WME(bss->ni_ies.wme_ie)->wme_info : 0);
4207 mwl_setglobalkeys(vap);
4209 case IEEE80211_M_HOSTAP:
4210 case IEEE80211_M_MBSS:
4211 error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4212 0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4214 mwl_setglobalkeys(vap);
4225 mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4227 struct mwl_vap *mvp = MWL_VAP(vap);
4228 struct mwl_hal_vap *hvap = mvp->mv_hvap;
4229 struct ieee80211com *ic = vap->iv_ic;
4230 struct ieee80211_node *ni = NULL;
4231 struct ifnet *ifp = ic->ic_ifp;
4232 struct mwl_softc *sc = ifp->if_softc;
4233 struct mwl_hal *mh = sc->sc_mh;
4234 enum ieee80211_state ostate = vap->iv_state;
4237 DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4238 vap->iv_ifp->if_xname, __func__,
4239 ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4241 callout_stop(&sc->sc_timer);
4243 * Clear current radar detection state.
4245 if (ostate == IEEE80211_S_CAC) {
4246 /* stop quiet mode radar detection */
4247 mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4248 } else if (sc->sc_radarena) {
4249 /* stop in-service radar detection */
4250 mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4251 sc->sc_radarena = 0;
4254 * Carry out per-state actions before doing net80211 work.
4256 if (nstate == IEEE80211_S_INIT) {
4257 /* NB: only ap+sta vap's have a fw entity */
4260 } else if (nstate == IEEE80211_S_SCAN) {
4261 mwl_hal_start(hvap);
4262 /* NB: this disables beacon frames */
4263 mwl_hal_setinframode(hvap);
4264 } else if (nstate == IEEE80211_S_AUTH) {
4266 * Must create a sta db entry in case a WEP key needs to
4267 * be plumbed. This entry will be overwritten if we
4268 * associate; otherwise it will be reclaimed on node free.
4271 MWL_NODE(ni)->mn_hvap = hvap;
4272 (void) mwl_peerstadb(ni, 0, 0, NULL);
4273 } else if (nstate == IEEE80211_S_CSA) {
4274 /* XXX move to below? */
4275 if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4276 vap->iv_opmode == IEEE80211_M_MBSS)
4278 } else if (nstate == IEEE80211_S_CAC) {
4279 /* XXX move to below? */
4280 /* stop ap xmit and enable quiet mode radar detection */
4281 mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4285 * Invoke the parent method to do net80211 work.
4287 error = mvp->mv_newstate(vap, nstate, arg);
4290 * Carry out work that must be done after net80211 runs;
4291 * this work requires up to date state (e.g. iv_bss).
4293 if (error == 0 && nstate == IEEE80211_S_RUN) {
4294 /* NB: collect bss node again, it may have changed */
4297 DPRINTF(sc, MWL_DEBUG_STATE,
4298 "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4299 "capinfo 0x%04x chan %d\n",
4300 vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4301 ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4302 ieee80211_chan2ieee(ic, ic->ic_curchan));
4305 * Recreate local sta db entry to update WME/HT state.
4307 mwl_localstadb(vap);
4308 switch (vap->iv_opmode) {
4309 case IEEE80211_M_HOSTAP:
4310 case IEEE80211_M_MBSS:
4311 if (ostate == IEEE80211_S_CAC) {
4312 /* enable in-service radar detection */
4313 mwl_hal_setradardetection(mh,
4314 DR_IN_SERVICE_MONITOR_START);
4315 sc->sc_radarena = 1;
4318 * Allocate and setup the beacon frame
4319 * (and related state).
4321 error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4323 DPRINTF(sc, MWL_DEBUG_STATE,
4324 "%s: beacon setup failed, error %d\n",
4328 /* NB: must be after setting up beacon */
4329 mwl_hal_start(hvap);
4331 case IEEE80211_M_STA:
4332 DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4333 vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4335 * Set state now that we're associated.
4337 mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4339 mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4340 if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4341 sc->sc_ndwdsvaps++ == 0)
4342 mwl_hal_setdwds(mh, 1);
4344 case IEEE80211_M_WDS:
4345 DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4346 vap->iv_ifp->if_xname, __func__,
4347 ether_sprintf(ni->ni_bssid));
4348 mwl_seteapolformat(vap);
4354 * Set CS mode according to operating channel;
4355 * this mostly an optimization for 5GHz.
4357 * NB: must follow mwl_hal_start which resets csmode
4359 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4360 mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4362 mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4364 * Start timer to prod firmware.
4366 if (sc->sc_ageinterval != 0)
4367 callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4368 mwl_agestations, sc);
4369 } else if (nstate == IEEE80211_S_SLEEP) {
4370 /* XXX set chip in power save */
4371 } else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4372 --sc->sc_ndwdsvaps == 0)
4373 mwl_hal_setdwds(mh, 0);
4379 * Manage station id's; these are separate from AID's
4380 * as AID's may have values out of the range of possible
4381 * station id's acceptable to the firmware.
4384 allocstaid(struct mwl_softc *sc, int aid)
4388 if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4389 /* NB: don't use 0 */
4390 for (staid = 1; staid < MWL_MAXSTAID; staid++)
4391 if (isclr(sc->sc_staid, staid))
4395 setbit(sc->sc_staid, staid);
4400 delstaid(struct mwl_softc *sc, int staid)
4402 clrbit(sc->sc_staid, staid);
4406 * Setup driver-specific state for a newly associated node.
4407 * Note that we're called also on a re-associate, the isnew
4408 * param tells us if this is the first time or not.
4411 mwl_newassoc(struct ieee80211_node *ni, int isnew)
4413 struct ieee80211vap *vap = ni->ni_vap;
4414 struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4415 struct mwl_node *mn = MWL_NODE(ni);
4416 MWL_HAL_PEERINFO pi;
4420 aid = IEEE80211_AID(ni->ni_associd);
4422 mn->mn_staid = allocstaid(sc, aid);
4423 mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4426 /* XXX reset BA stream? */
4428 DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4429 __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4430 error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4432 DPRINTF(sc, MWL_DEBUG_NODE,
4433 "%s: error %d creating sta db entry\n",
4435 /* XXX how to deal with error? */
4440 * Periodically poke the firmware to age out station state
4441 * (power save queues, pending tx aggregates).
4444 mwl_agestations(void *arg)
4446 struct mwl_softc *sc = arg;
4448 mwl_hal_setkeepalive(sc->sc_mh);
4449 if (sc->sc_ageinterval != 0) /* NB: catch dynamic changes */
4450 callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4453 static const struct mwl_hal_channel *
4454 findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4458 for (i = 0; i < ci->nchannels; i++) {
4459 const struct mwl_hal_channel *hc = &ci->channels[i];
4460 if (hc->ieee == ieee)
4467 mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4468 int nchan, struct ieee80211_channel chans[])
4470 struct mwl_softc *sc = ic->ic_ifp->if_softc;
4471 struct mwl_hal *mh = sc->sc_mh;
4472 const MWL_HAL_CHANNELINFO *ci;
4475 for (i = 0; i < nchan; i++) {
4476 struct ieee80211_channel *c = &chans[i];
4477 const struct mwl_hal_channel *hc;
4479 if (IEEE80211_IS_CHAN_2GHZ(c)) {
4480 mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4481 IEEE80211_IS_CHAN_HT40(c) ?
4482 MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4483 } else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4484 mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4485 IEEE80211_IS_CHAN_HT40(c) ?
4486 MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4488 if_printf(ic->ic_ifp,
4489 "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4490 __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4494 * Verify channel has cal data and cap tx power.
4496 hc = findhalchannel(ci, c->ic_ieee);
4498 if (c->ic_maxpower > 2*hc->maxTxPow)
4499 c->ic_maxpower = 2*hc->maxTxPow;
4502 if (IEEE80211_IS_CHAN_HT40(c)) {
4504 * Look for the extension channel since the
4505 * hal table only has the primary channel.
4507 hc = findhalchannel(ci, c->ic_extieee);
4509 if (c->ic_maxpower > 2*hc->maxTxPow)
4510 c->ic_maxpower = 2*hc->maxTxPow;
4514 if_printf(ic->ic_ifp,
4515 "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4516 __func__, c->ic_ieee, c->ic_extieee,
4517 c->ic_freq, c->ic_flags);
4525 #define IEEE80211_CHAN_HTG (IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4526 #define IEEE80211_CHAN_HTA (IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4529 addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
4532 c->ic_flags = flags;
4535 c->ic_maxpower = 2*txpow;
4536 c->ic_maxregpower = txpow;
4539 static const struct ieee80211_channel *
4540 findchannel(const struct ieee80211_channel chans[], int nchans,
4541 int freq, int flags)
4543 const struct ieee80211_channel *c;
4546 for (i = 0; i < nchans; i++) {
4548 if (c->ic_freq == freq && c->ic_flags == flags)
4555 addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4556 const MWL_HAL_CHANNELINFO *ci, int flags)
4558 struct ieee80211_channel *c;
4559 const struct ieee80211_channel *extc;
4560 const struct mwl_hal_channel *hc;
4563 c = &chans[*nchans];
4565 flags &= ~IEEE80211_CHAN_HT;
4566 for (i = 0; i < ci->nchannels; i++) {
4568 * Each entry defines an HT40 channel pair; find the
4569 * extension channel above and the insert the pair.
4571 hc = &ci->channels[i];
4572 extc = findchannel(chans, *nchans, hc->freq+20,
4573 flags | IEEE80211_CHAN_HT20);
4575 if (*nchans >= maxchans)
4577 addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
4578 hc->ieee, hc->maxTxPow);
4579 c->ic_extieee = extc->ic_ieee;
4581 if (*nchans >= maxchans)
4583 addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
4584 extc->ic_ieee, hc->maxTxPow);
4585 c->ic_extieee = hc->ieee;
4592 addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4593 const MWL_HAL_CHANNELINFO *ci, int flags)
4595 struct ieee80211_channel *c;
4598 c = &chans[*nchans];
4600 for (i = 0; i < ci->nchannels; i++) {
4601 const struct mwl_hal_channel *hc;
4603 hc = &ci->channels[i];
4604 if (*nchans >= maxchans)
4606 addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
4608 if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
4609 /* g channel have a separate b-only entry */
4610 if (*nchans >= maxchans)
4613 c[-1].ic_flags = IEEE80211_CHAN_B;
4616 if (flags == IEEE80211_CHAN_HTG) {
4617 /* HT g channel have a separate g-only entry */
4618 if (*nchans >= maxchans)
4620 c[-1].ic_flags = IEEE80211_CHAN_G;
4622 c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4623 c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */
4626 if (flags == IEEE80211_CHAN_HTA) {
4627 /* HT a channel have a separate a-only entry */
4628 if (*nchans >= maxchans)
4630 c[-1].ic_flags = IEEE80211_CHAN_A;
4632 c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4633 c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */
4640 getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4641 struct ieee80211_channel chans[])
4643 const MWL_HAL_CHANNELINFO *ci;
4646 * Use the channel info from the hal to craft the
4647 * channel list. Note that we pass back an unsorted
4648 * list; the caller is required to sort it for us
4652 if (mwl_hal_getchannelinfo(sc->sc_mh,
4653 MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4654 addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4655 if (mwl_hal_getchannelinfo(sc->sc_mh,
4656 MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4657 addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4658 if (mwl_hal_getchannelinfo(sc->sc_mh,
4659 MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4660 addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4661 if (mwl_hal_getchannelinfo(sc->sc_mh,
4662 MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4663 addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4667 mwl_getradiocaps(struct ieee80211com *ic,
4668 int maxchans, int *nchans, struct ieee80211_channel chans[])
4670 struct mwl_softc *sc = ic->ic_ifp->if_softc;
4672 getchannels(sc, maxchans, nchans, chans);
4676 mwl_getchannels(struct mwl_softc *sc)
4678 struct ifnet *ifp = sc->sc_ifp;
4679 struct ieee80211com *ic = ifp->if_l2com;
4682 * Use the channel info from the hal to craft the
4683 * channel list for net80211. Note that we pass up
4684 * an unsorted list; net80211 will sort it for us.
4686 memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4688 getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4690 ic->ic_regdomain.regdomain = SKU_DEBUG;
4691 ic->ic_regdomain.country = CTRY_DEFAULT;
4692 ic->ic_regdomain.location = 'I';
4693 ic->ic_regdomain.isocc[0] = ' '; /* XXX? */
4694 ic->ic_regdomain.isocc[1] = ' ';
4695 return (ic->ic_nchans == 0 ? EIO : 0);
4697 #undef IEEE80211_CHAN_HTA
4698 #undef IEEE80211_CHAN_HTG
4702 mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4704 const struct mwl_rxdesc *ds = bf->bf_desc;
4705 uint32_t status = le32toh(ds->Status);
4707 printf("R[%2u] (DS.V:%p DS.P:%p) NEXT:%08x DATA:%08x RC:%02x%s\n"
4708 " STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4709 ix, ds, (const struct mwl_desc *)bf->bf_daddr,
4710 le32toh(ds->pPhysNext), le32toh(ds->pPhysBuffData),
4712 ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4713 "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4714 ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4715 ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4719 mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4721 const struct mwl_txdesc *ds = bf->bf_desc;
4722 uint32_t status = le32toh(ds->Status);
4724 printf("Q%u[%3u]", qnum, ix);
4725 printf(" (DS.V:%p DS.P:%p)\n",
4726 ds, (const struct mwl_txdesc *)bf->bf_daddr);
4727 printf(" NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4728 le32toh(ds->pPhysNext),
4729 le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4730 status & EAGLE_TXD_STATUS_USED ?
4731 "" : (status & 3) != 0 ? " *" : " !");
4732 printf(" RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4733 ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4734 le32toh(ds->SapPktInfo), le16toh(ds->Format));
4736 printf(" MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4737 , le32toh(ds->multiframes)
4738 , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4739 , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4740 , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4742 printf(" DATA:%08x %08x %08x %08x %08x %08x\n"
4743 , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4744 , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4745 , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4749 { const uint8_t *cp = (const uint8_t *) ds;
4751 for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4752 printf("%02x ", cp[i]);
4753 if (((i+1) % 16) == 0)
4760 #endif /* MWL_DEBUG */
4764 mwl_txq_dump(struct mwl_txq *txq)
4766 struct mwl_txbuf *bf;
4770 STAILQ_FOREACH(bf, &txq->active, bf_list) {
4771 struct mwl_txdesc *ds = bf->bf_desc;
4772 MWL_TXDESC_SYNC(txq, ds,
4773 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4775 mwl_printtxbuf(bf, txq->qnum, i);
4779 MWL_TXQ_UNLOCK(txq);
4784 mwl_watchdog(void *arg)
4786 struct mwl_softc *sc;
4790 callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4791 if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4795 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
4796 if (mwl_hal_setkeepalive(sc->sc_mh))
4797 if_printf(ifp, "transmit timeout (firmware hung?)\n");
4799 if_printf(ifp, "transmit timeout\n");
4802 mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4805 sc->sc_stats.mst_watchdog++;
4811 * Diagnostic interface to the HAL. This is used by various
4812 * tools to do things like retrieve register contents for
4813 * debugging. The mechanism is intentionally opaque so that
4814 * it can change frequently w/o concern for compatiblity.
4817 mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4819 struct mwl_hal *mh = sc->sc_mh;
4820 u_int id = md->md_id & MWL_DIAG_ID;
4821 void *indata = NULL;
4822 void *outdata = NULL;
4823 u_int32_t insize = md->md_in_size;
4824 u_int32_t outsize = md->md_out_size;
4827 if (md->md_id & MWL_DIAG_IN) {
4831 indata = malloc(insize, M_TEMP, M_NOWAIT);
4832 if (indata == NULL) {
4836 error = copyin(md->md_in_data, indata, insize);
4840 if (md->md_id & MWL_DIAG_DYN) {
4842 * Allocate a buffer for the results (otherwise the HAL
4843 * returns a pointer to a buffer where we can read the
4844 * results). Note that we depend on the HAL leaving this
4845 * pointer for us to use below in reclaiming the buffer;
4846 * may want to be more defensive.
4848 outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4849 if (outdata == NULL) {
4854 if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4855 if (outsize < md->md_out_size)
4856 md->md_out_size = outsize;
4857 if (outdata != NULL)
4858 error = copyout(outdata, md->md_out_data,
4864 if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4865 free(indata, M_TEMP);
4866 if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4867 free(outdata, M_TEMP);
4872 mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4874 struct mwl_hal *mh = sc->sc_mh;
4877 MWL_LOCK_ASSERT(sc);
4879 if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4880 device_printf(sc->sc_dev, "unable to load firmware\n");
4883 if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4884 device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4887 error = mwl_setupdma(sc);
4889 /* NB: mwl_setupdma prints a msg */
4893 * Reset tx/rx data structures; after reload we must
4894 * re-start the driver's notion of the next xmit/recv.
4896 mwl_draintxq(sc); /* clear pending frames */
4897 mwl_resettxq(sc); /* rebuild tx q lists */
4898 sc->sc_rxnext = NULL; /* force rx to start at the list head */
4901 #endif /* MWL_DIAGAPI */
4904 mwl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4906 #define IS_RUNNING(ifp) \
4907 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
4908 struct mwl_softc *sc = ifp->if_softc;
4909 struct ieee80211com *ic = ifp->if_l2com;
4910 struct ifreq *ifr = (struct ifreq *)data;
4911 int error = 0, startall;
4917 if (IS_RUNNING(ifp)) {
4919 * To avoid rescanning another access point,
4920 * do not call mwl_init() here. Instead,
4921 * only reflect promisc mode settings.
4924 } else if (ifp->if_flags & IFF_UP) {
4926 * Beware of being called during attach/detach
4927 * to reset promiscuous mode. In that case we
4928 * will still be marked UP but not RUNNING.
4929 * However trying to re-init the interface
4930 * is the wrong thing to do as we've already
4931 * torn down much of our state. There's
4932 * probably a better way to deal with this.
4934 if (!sc->sc_invalid) {
4935 mwl_init_locked(sc); /* XXX lose error */
4939 mwl_stop_locked(ifp, 1);
4942 ieee80211_start_all(ic);
4945 mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4946 /* NB: embed these numbers to get a consistent view */
4947 sc->sc_stats.mst_tx_packets = ifp->if_opackets;
4948 sc->sc_stats.mst_rx_packets = ifp->if_ipackets;
4950 * NB: Drop the softc lock in case of a page fault;
4951 * we'll accept any potential inconsisentcy in the
4952 * statistics. The alternative is to copy the data
4953 * to a local structure.
4955 return copyout(&sc->sc_stats,
4956 ifr->ifr_data, sizeof (sc->sc_stats));
4959 /* XXX check privs */
4960 return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4962 /* XXX check privs */
4964 error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4967 #endif /* MWL_DIAGAPI */
4969 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4972 error = ether_ioctl(ifp, cmd, data);
4984 mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4986 struct mwl_softc *sc = arg1;
4989 debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4990 error = sysctl_handle_int(oidp, &debug, 0, req);
4991 if (error || !req->newptr)
4993 mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4994 sc->sc_debug = debug & 0x00ffffff;
4997 #endif /* MWL_DEBUG */
5000 mwl_sysctlattach(struct mwl_softc *sc)
5003 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
5004 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
5006 sc->sc_debug = mwl_debug;
5007 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
5008 "debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
5009 mwl_sysctl_debug, "I", "control debugging printfs");
5014 * Announce various information on device/driver attach.
5017 mwl_announce(struct mwl_softc *sc)
5019 struct ifnet *ifp = sc->sc_ifp;
5021 if_printf(ifp, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
5022 sc->sc_hwspecs.hwVersion,
5023 (sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
5024 (sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
5025 (sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
5026 (sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
5027 sc->sc_hwspecs.regionCode);
5028 sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
5032 for (i = 0; i <= WME_AC_VO; i++) {
5033 struct mwl_txq *txq = sc->sc_ac2q[i];
5034 if_printf(ifp, "Use hw queue %u for %s traffic\n",
5035 txq->qnum, ieee80211_wme_acnames[i]);
5038 if (bootverbose || mwl_rxdesc != MWL_RXDESC)
5039 if_printf(ifp, "using %u rx descriptors\n", mwl_rxdesc);
5040 if (bootverbose || mwl_rxbuf != MWL_RXBUF)
5041 if_printf(ifp, "using %u rx buffers\n", mwl_rxbuf);
5042 if (bootverbose || mwl_txbuf != MWL_TXBUF)
5043 if_printf(ifp, "using %u tx buffers\n", mwl_txbuf);
5044 if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
5045 if_printf(ifp, "multi-bss support\n");
5046 #ifdef MWL_TX_NODROP
5048 if_printf(ifp, "no tx drop\n");