2 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14 * redistribution must be conditioned upon including a substantially
15 * similar Disclaimer requirement for further binary redistribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGES.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 * Driver for the Marvell 88W8363 Wireless LAN controller.
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sysctl.h>
46 #include <sys/malloc.h>
48 #include <sys/mutex.h>
49 #include <sys/kernel.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
52 #include <sys/errno.h>
53 #include <sys/callout.h>
55 #include <sys/endian.h>
56 #include <sys/kthread.h>
57 #include <sys/taskqueue.h>
59 #include <machine/bus.h>
62 #include <net/if_dl.h>
63 #include <net/if_media.h>
64 #include <net/if_types.h>
65 #include <net/if_arp.h>
66 #include <net/ethernet.h>
67 #include <net/if_llc.h>
71 #include <net80211/ieee80211_var.h>
72 #include <net80211/ieee80211_regdomain.h>
75 #include <netinet/in.h>
76 #include <netinet/if_ether.h>
79 #include <dev/mwl/if_mwlvar.h>
80 #include <dev/mwl/mwldiag.h>
82 /* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
83 #define MS(v,x) (((v) & x) >> x##_S)
84 #define SM(v,x) (((v) << x##_S) & x)
86 static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
87 const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
88 const uint8_t [IEEE80211_ADDR_LEN],
89 const uint8_t [IEEE80211_ADDR_LEN]);
90 static void mwl_vap_delete(struct ieee80211vap *);
91 static int mwl_setupdma(struct mwl_softc *);
92 static int mwl_hal_reset(struct mwl_softc *sc);
93 static int mwl_init_locked(struct mwl_softc *);
94 static void mwl_init(void *);
95 static void mwl_stop_locked(struct ifnet *, int);
96 static int mwl_reset(struct ieee80211vap *, u_long);
97 static void mwl_stop(struct ifnet *, int);
98 static void mwl_start(struct ifnet *);
99 static int mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
100 const struct ieee80211_bpf_params *);
101 static int mwl_media_change(struct ifnet *);
102 static void mwl_watchdog(void *);
103 static int mwl_ioctl(struct ifnet *, u_long, caddr_t);
104 static void mwl_radar_proc(void *, int);
105 static void mwl_chanswitch_proc(void *, int);
106 static void mwl_bawatchdog_proc(void *, int);
107 static int mwl_key_alloc(struct ieee80211vap *,
108 struct ieee80211_key *,
109 ieee80211_keyix *, ieee80211_keyix *);
110 static int mwl_key_delete(struct ieee80211vap *,
111 const struct ieee80211_key *);
112 static int mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *,
113 const uint8_t mac[IEEE80211_ADDR_LEN]);
114 static int mwl_mode_init(struct mwl_softc *);
115 static void mwl_update_mcast(struct ifnet *);
116 static void mwl_update_promisc(struct ifnet *);
117 static void mwl_updateslot(struct ifnet *);
118 static int mwl_beacon_setup(struct ieee80211vap *);
119 static void mwl_beacon_update(struct ieee80211vap *, int);
120 #ifdef MWL_HOST_PS_SUPPORT
121 static void mwl_update_ps(struct ieee80211vap *, int);
122 static int mwl_set_tim(struct ieee80211_node *, int);
124 static int mwl_dma_setup(struct mwl_softc *);
125 static void mwl_dma_cleanup(struct mwl_softc *);
126 static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
127 const uint8_t [IEEE80211_ADDR_LEN]);
128 static void mwl_node_cleanup(struct ieee80211_node *);
129 static void mwl_node_drain(struct ieee80211_node *);
130 static void mwl_node_getsignal(const struct ieee80211_node *,
132 static void mwl_node_getmimoinfo(const struct ieee80211_node *,
133 struct ieee80211_mimo_info *);
134 static int mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
135 static void mwl_rx_proc(void *, int);
136 static void mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
137 static int mwl_tx_setup(struct mwl_softc *, int, int);
138 static int mwl_wme_update(struct ieee80211com *);
139 static void mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
140 static void mwl_tx_cleanup(struct mwl_softc *);
141 static uint16_t mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
142 static int mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
143 struct mwl_txbuf *, struct mbuf *);
144 static void mwl_tx_proc(void *, int);
145 static int mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
146 static void mwl_draintxq(struct mwl_softc *);
147 static void mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
148 static int mwl_recv_action(struct ieee80211_node *,
149 const struct ieee80211_frame *,
150 const uint8_t *, const uint8_t *);
151 static int mwl_addba_request(struct ieee80211_node *,
152 struct ieee80211_tx_ampdu *, int dialogtoken,
153 int baparamset, int batimeout);
154 static int mwl_addba_response(struct ieee80211_node *,
155 struct ieee80211_tx_ampdu *, int status,
156 int baparamset, int batimeout);
157 static void mwl_addba_stop(struct ieee80211_node *,
158 struct ieee80211_tx_ampdu *);
159 static int mwl_startrecv(struct mwl_softc *);
160 static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
161 struct ieee80211_channel *);
162 static int mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
163 static void mwl_scan_start(struct ieee80211com *);
164 static void mwl_scan_end(struct ieee80211com *);
165 static void mwl_set_channel(struct ieee80211com *);
166 static int mwl_peerstadb(struct ieee80211_node *,
167 int aid, int staid, MWL_HAL_PEERINFO *pi);
168 static int mwl_localstadb(struct ieee80211vap *);
169 static int mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
170 static int allocstaid(struct mwl_softc *sc, int aid);
171 static void delstaid(struct mwl_softc *sc, int staid);
172 static void mwl_newassoc(struct ieee80211_node *, int);
173 static void mwl_agestations(void *);
174 static int mwl_setregdomain(struct ieee80211com *,
175 struct ieee80211_regdomain *, int,
176 struct ieee80211_channel []);
177 static void mwl_getradiocaps(struct ieee80211com *, int, int *,
178 struct ieee80211_channel []);
179 static int mwl_getchannels(struct mwl_softc *);
181 static void mwl_sysctlattach(struct mwl_softc *);
182 static void mwl_announce(struct mwl_softc *);
184 SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
186 static int mwl_rxdesc = MWL_RXDESC; /* # rx desc's to allocate */
187 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
188 0, "rx descriptors allocated");
189 static int mwl_rxbuf = MWL_RXBUF; /* # rx buffers to allocate */
190 SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RW, &mwl_rxbuf,
191 0, "rx buffers allocated");
192 TUNABLE_INT("hw.mwl.rxbuf", &mwl_rxbuf);
193 static int mwl_txbuf = MWL_TXBUF; /* # tx buffers to allocate */
194 SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RW, &mwl_txbuf,
195 0, "tx buffers allocated");
196 TUNABLE_INT("hw.mwl.txbuf", &mwl_txbuf);
197 static int mwl_txcoalesce = 8; /* # tx packets to q before poking f/w*/
198 SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RW, &mwl_txcoalesce,
199 0, "tx buffers to send at once");
200 TUNABLE_INT("hw.mwl.txcoalesce", &mwl_txcoalesce);
201 static int mwl_rxquota = MWL_RXBUF; /* # max buffers to process */
202 SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RW, &mwl_rxquota,
203 0, "max rx buffers to process per interrupt");
204 TUNABLE_INT("hw.mwl.rxquota", &mwl_rxquota);
205 static int mwl_rxdmalow = 3; /* # min buffers for wakeup */
206 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RW, &mwl_rxdmalow,
207 0, "min free rx buffers before restarting traffic");
208 TUNABLE_INT("hw.mwl.rxdmalow", &mwl_rxdmalow);
211 static int mwl_debug = 0;
212 SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RW, &mwl_debug,
213 0, "control debugging printfs");
214 TUNABLE_INT("hw.mwl.debug", &mwl_debug);
216 MWL_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
217 MWL_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */
218 MWL_DEBUG_RECV = 0x00000004, /* basic recv operation */
219 MWL_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */
220 MWL_DEBUG_RESET = 0x00000010, /* reset processing */
221 MWL_DEBUG_BEACON = 0x00000020, /* beacon handling */
222 MWL_DEBUG_INTR = 0x00000040, /* ISR */
223 MWL_DEBUG_TX_PROC = 0x00000080, /* tx ISR proc */
224 MWL_DEBUG_RX_PROC = 0x00000100, /* rx ISR proc */
225 MWL_DEBUG_KEYCACHE = 0x00000200, /* key cache management */
226 MWL_DEBUG_STATE = 0x00000400, /* 802.11 state transitions */
227 MWL_DEBUG_NODE = 0x00000800, /* node management */
228 MWL_DEBUG_RECV_ALL = 0x00001000, /* trace all frames (beacons) */
229 MWL_DEBUG_TSO = 0x00002000, /* TSO processing */
230 MWL_DEBUG_AMPDU = 0x00004000, /* BA stream handling */
231 MWL_DEBUG_ANY = 0xffffffff
233 #define IS_BEACON(wh) \
234 ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
235 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
236 #define IFF_DUMPPKTS_RECV(sc, wh) \
237 (((sc->sc_debug & MWL_DEBUG_RECV) && \
238 ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \
239 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
240 #define IFF_DUMPPKTS_XMIT(sc) \
241 ((sc->sc_debug & MWL_DEBUG_XMIT) || \
242 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
243 #define DPRINTF(sc, m, fmt, ...) do { \
244 if (sc->sc_debug & (m)) \
245 printf(fmt, __VA_ARGS__); \
247 #define KEYPRINTF(sc, hk, mac) do { \
248 if (sc->sc_debug & MWL_DEBUG_KEYCACHE) \
249 mwl_keyprint(sc, __func__, hk, mac); \
251 static void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
252 static void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
254 #define IFF_DUMPPKTS_RECV(sc, wh) \
255 ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
256 #define IFF_DUMPPKTS_XMIT(sc) \
257 ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
258 #define DPRINTF(sc, m, fmt, ...) do { \
261 #define KEYPRINTF(sc, k, mac) do { \
266 static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
269 * Each packet has fixed front matter: a 2-byte length
270 * of the payload, followed by a 4-address 802.11 header
271 * (regardless of the actual header and always w/o any
272 * QoS header). The payload then follows.
276 struct ieee80211_frame_addr4 wh;
280 * Read/Write shorthands for accesses to BAR 0. Note
281 * that all BAR 1 operations are done in the "hal" and
282 * there should be no reference to them here.
285 static __inline uint32_t
286 RD4(struct mwl_softc *sc, bus_size_t off)
288 return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
293 WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
295 bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
299 mwl_attach(uint16_t devid, struct mwl_softc *sc)
302 struct ieee80211com *ic;
306 DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
308 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
310 device_printf(sc->sc_dev, "cannot if_alloc()\n");
316 * Setup the RX free list lock early, so it can be consistently
321 /* set these up early for if_printf use */
322 if_initname(ifp, device_get_name(sc->sc_dev),
323 device_get_unit(sc->sc_dev));
325 mh = mwl_hal_attach(sc->sc_dev, devid,
326 sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
328 if_printf(ifp, "unable to attach HAL\n");
334 * Load firmware so we can get setup. We arbitrarily
335 * pick station firmware; we'll re-load firmware as
336 * needed so setting up the wrong mode isn't a big deal.
338 if (mwl_hal_fwload(mh, NULL) != 0) {
339 if_printf(ifp, "unable to setup builtin firmware\n");
343 if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
344 if_printf(ifp, "unable to fetch h/w specs\n");
348 error = mwl_getchannels(sc);
352 sc->sc_txantenna = 0; /* h/w default */
353 sc->sc_rxantenna = 0; /* h/w default */
354 sc->sc_invalid = 0; /* ready to go, enable int handling */
355 sc->sc_ageinterval = MWL_AGEINTERVAL;
358 * Allocate tx+rx descriptors and populate the lists.
359 * We immediately push the information to the firmware
360 * as otherwise it gets upset.
362 error = mwl_dma_setup(sc);
364 if_printf(ifp, "failed to setup descriptors: %d\n", error);
367 error = mwl_setupdma(sc); /* push to firmware */
368 if (error != 0) /* NB: mwl_setupdma prints msg */
371 callout_init(&sc->sc_timer, CALLOUT_MPSAFE);
372 callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
374 sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
375 taskqueue_thread_enqueue, &sc->sc_tq);
376 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
377 "%s taskq", ifp->if_xname);
379 TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
380 TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
381 TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
382 TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
384 /* NB: insure BK queue is the lowest priority h/w queue */
385 if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
386 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
387 ieee80211_wme_acnames[WME_AC_BK]);
391 if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
392 !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
393 !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
395 * Not enough hardware tx queues to properly do WME;
396 * just punt and assign them all to the same h/w queue.
397 * We could do a better job of this if, for example,
398 * we allocate queues when we switch from station to
401 if (sc->sc_ac2q[WME_AC_VI] != NULL)
402 mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
403 if (sc->sc_ac2q[WME_AC_BE] != NULL)
404 mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
405 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
406 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
407 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
409 TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
412 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
413 ifp->if_start = mwl_start;
414 ifp->if_ioctl = mwl_ioctl;
415 ifp->if_init = mwl_init;
416 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
417 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
418 IFQ_SET_READY(&ifp->if_snd);
421 /* XXX not right but it's not used anywhere important */
422 ic->ic_phytype = IEEE80211_T_OFDM;
423 ic->ic_opmode = IEEE80211_M_STA;
425 IEEE80211_C_STA /* station mode supported */
426 | IEEE80211_C_HOSTAP /* hostap mode */
427 | IEEE80211_C_MONITOR /* monitor mode */
429 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
430 | IEEE80211_C_AHDEMO /* adhoc demo mode */
432 | IEEE80211_C_MBSS /* mesh point link mode */
433 | IEEE80211_C_WDS /* WDS supported */
434 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
435 | IEEE80211_C_SHSLOT /* short slot time supported */
436 | IEEE80211_C_WME /* WME/WMM supported */
437 | IEEE80211_C_BURST /* xmit bursting supported */
438 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
439 | IEEE80211_C_BGSCAN /* capable of bg scanning */
440 | IEEE80211_C_TXFRAG /* handle tx frags */
441 | IEEE80211_C_TXPMGT /* capable of txpow mgt */
442 | IEEE80211_C_DFS /* DFS supported */
446 IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */
447 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */
448 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
449 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
450 | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
451 #if MWL_AGGR_SIZE == 7935
452 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */
454 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */
457 | IEEE80211_HTCAP_PSMP /* PSMP supported */
458 | IEEE80211_HTCAP_40INTOLERANT /* 40MHz intolerant */
460 /* s/w capabilities */
461 | IEEE80211_HTC_HT /* HT operation */
462 | IEEE80211_HTC_AMPDU /* tx A-MPDU */
463 | IEEE80211_HTC_AMSDU /* tx A-MSDU */
464 | IEEE80211_HTC_SMPS /* SMPS available */
468 * Mark h/w crypto support.
469 * XXX no way to query h/w support.
471 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
472 | IEEE80211_CRYPTO_AES_CCM
473 | IEEE80211_CRYPTO_TKIP
474 | IEEE80211_CRYPTO_TKIPMIC
477 * Transmit requires space in the packet for a special
478 * format transmit record and optional padding between
479 * this record and the payload. Ask the net80211 layer
480 * to arrange this when encapsulating packets so we can
481 * add it efficiently.
483 ic->ic_headroom = sizeof(struct mwltxrec) -
484 sizeof(struct ieee80211_frame);
486 /* call MI attach routine. */
487 ieee80211_ifattach(ic, sc->sc_hwspecs.macAddr);
488 ic->ic_setregdomain = mwl_setregdomain;
489 ic->ic_getradiocaps = mwl_getradiocaps;
490 /* override default methods */
491 ic->ic_raw_xmit = mwl_raw_xmit;
492 ic->ic_newassoc = mwl_newassoc;
493 ic->ic_updateslot = mwl_updateslot;
494 ic->ic_update_mcast = mwl_update_mcast;
495 ic->ic_update_promisc = mwl_update_promisc;
496 ic->ic_wme.wme_update = mwl_wme_update;
498 ic->ic_node_alloc = mwl_node_alloc;
499 sc->sc_node_cleanup = ic->ic_node_cleanup;
500 ic->ic_node_cleanup = mwl_node_cleanup;
501 sc->sc_node_drain = ic->ic_node_drain;
502 ic->ic_node_drain = mwl_node_drain;
503 ic->ic_node_getsignal = mwl_node_getsignal;
504 ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
506 ic->ic_scan_start = mwl_scan_start;
507 ic->ic_scan_end = mwl_scan_end;
508 ic->ic_set_channel = mwl_set_channel;
510 sc->sc_recv_action = ic->ic_recv_action;
511 ic->ic_recv_action = mwl_recv_action;
512 sc->sc_addba_request = ic->ic_addba_request;
513 ic->ic_addba_request = mwl_addba_request;
514 sc->sc_addba_response = ic->ic_addba_response;
515 ic->ic_addba_response = mwl_addba_response;
516 sc->sc_addba_stop = ic->ic_addba_stop;
517 ic->ic_addba_stop = mwl_addba_stop;
519 ic->ic_vap_create = mwl_vap_create;
520 ic->ic_vap_delete = mwl_vap_delete;
522 ieee80211_radiotap_attach(ic,
523 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
524 MWL_TX_RADIOTAP_PRESENT,
525 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
526 MWL_RX_RADIOTAP_PRESENT);
528 * Setup dynamic sysctl's now that country code and
529 * regdomain are available from the hal.
531 mwl_sysctlattach(sc);
534 ieee80211_announce(ic);
542 MWL_RXFREE_DESTROY(sc);
549 mwl_detach(struct mwl_softc *sc)
551 struct ifnet *ifp = sc->sc_ifp;
552 struct ieee80211com *ic = ifp->if_l2com;
554 DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
555 __func__, ifp->if_flags);
559 * NB: the order of these is important:
560 * o call the 802.11 layer before detaching the hal to
561 * insure callbacks into the driver to delete global
562 * key cache entries can be handled
563 * o reclaim the tx queue data structures after calling
564 * the 802.11 layer as we'll get called back to reclaim
565 * node state and potentially want to use them
566 * o to cleanup the tx queues the hal is called, so detach
568 * Other than that, it's straightforward...
570 ieee80211_ifdetach(ic);
571 callout_drain(&sc->sc_watchdog);
573 MWL_RXFREE_DESTROY(sc);
575 mwl_hal_detach(sc->sc_mh);
582 * MAC address handling for multiple BSS on the same radio.
583 * The first vap uses the MAC address from the EEPROM. For
584 * subsequent vap's we set the U/L bit (bit 1) in the MAC
585 * address and use the next six bits as an index.
588 assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
592 if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
593 /* NB: we only do this if h/w supports multiple bssid */
594 for (i = 0; i < 32; i++)
595 if ((sc->sc_bssidmask & (1<<i)) == 0)
598 mac[0] |= (i << 2)|0x2;
601 sc->sc_bssidmask |= 1<<i;
607 reclaim_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN])
610 if (i != 0 || --sc->sc_nbssid0 == 0)
611 sc->sc_bssidmask &= ~(1<<i);
614 static struct ieee80211vap *
615 mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
616 enum ieee80211_opmode opmode, int flags,
617 const uint8_t bssid[IEEE80211_ADDR_LEN],
618 const uint8_t mac0[IEEE80211_ADDR_LEN])
620 struct ifnet *ifp = ic->ic_ifp;
621 struct mwl_softc *sc = ifp->if_softc;
622 struct mwl_hal *mh = sc->sc_mh;
623 struct ieee80211vap *vap, *apvap;
624 struct mwl_hal_vap *hvap;
626 uint8_t mac[IEEE80211_ADDR_LEN];
628 IEEE80211_ADDR_COPY(mac, mac0);
630 case IEEE80211_M_HOSTAP:
631 case IEEE80211_M_MBSS:
632 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
633 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
634 hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
636 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
637 reclaim_address(sc, mac);
641 case IEEE80211_M_STA:
642 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
643 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
644 hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
646 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
647 reclaim_address(sc, mac);
650 /* no h/w beacon miss support; always use s/w */
651 flags |= IEEE80211_CLONE_NOBEACONS;
653 case IEEE80211_M_WDS:
654 hvap = NULL; /* NB: we use associated AP vap */
655 if (sc->sc_napvaps == 0)
656 return NULL; /* no existing AP vap */
658 case IEEE80211_M_MONITOR:
661 case IEEE80211_M_IBSS:
662 case IEEE80211_M_AHDEMO:
667 mvp = (struct mwl_vap *) malloc(sizeof(struct mwl_vap),
668 M_80211_VAP, M_NOWAIT | M_ZERO);
671 mwl_hal_delvap(hvap);
672 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
673 reclaim_address(sc, mac);
679 if (opmode == IEEE80211_M_WDS) {
681 * WDS vaps must have an associated AP vap; find one.
684 TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
685 if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
686 mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
689 KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
692 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
694 IEEE80211_ADDR_COPY(vap->iv_myaddr, mac);
695 /* override with driver methods */
696 mvp->mv_newstate = vap->iv_newstate;
697 vap->iv_newstate = mwl_newstate;
698 vap->iv_max_keyix = 0; /* XXX */
699 vap->iv_key_alloc = mwl_key_alloc;
700 vap->iv_key_delete = mwl_key_delete;
701 vap->iv_key_set = mwl_key_set;
702 #ifdef MWL_HOST_PS_SUPPORT
703 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
704 vap->iv_update_ps = mwl_update_ps;
705 mvp->mv_set_tim = vap->iv_set_tim;
706 vap->iv_set_tim = mwl_set_tim;
709 vap->iv_reset = mwl_reset;
710 vap->iv_update_beacon = mwl_beacon_update;
712 /* override max aid so sta's cannot assoc when we're out of sta id's */
713 vap->iv_max_aid = MWL_MAXSTAID;
714 /* override default A-MPDU rx parameters */
715 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
716 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
719 ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status);
721 switch (vap->iv_opmode) {
722 case IEEE80211_M_HOSTAP:
723 case IEEE80211_M_MBSS:
724 case IEEE80211_M_STA:
726 * Setup sta db entry for local address.
729 if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
730 vap->iv_opmode == IEEE80211_M_MBSS)
735 case IEEE80211_M_WDS:
742 * Setup overall operating mode.
745 ic->ic_opmode = IEEE80211_M_HOSTAP;
746 else if (sc->sc_nstavaps)
747 ic->ic_opmode = IEEE80211_M_STA;
749 ic->ic_opmode = opmode;
755 mwl_vap_delete(struct ieee80211vap *vap)
757 struct mwl_vap *mvp = MWL_VAP(vap);
758 struct ifnet *parent = vap->iv_ic->ic_ifp;
759 struct mwl_softc *sc = parent->if_softc;
760 struct mwl_hal *mh = sc->sc_mh;
761 struct mwl_hal_vap *hvap = mvp->mv_hvap;
762 enum ieee80211_opmode opmode = vap->iv_opmode;
764 /* XXX disallow ap vap delete if WDS still present */
765 if (parent->if_drv_flags & IFF_DRV_RUNNING) {
766 /* quiesce h/w while we remove the vap */
767 mwl_hal_intrset(mh, 0); /* disable interrupts */
769 ieee80211_vap_detach(vap);
771 case IEEE80211_M_HOSTAP:
772 case IEEE80211_M_MBSS:
773 case IEEE80211_M_STA:
774 KASSERT(hvap != NULL, ("no hal vap handle"));
775 (void) mwl_hal_delstation(hvap, vap->iv_myaddr);
776 mwl_hal_delvap(hvap);
777 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
781 /* XXX don't do it for IEEE80211_CLONE_MACADDR */
782 reclaim_address(sc, vap->iv_myaddr);
784 case IEEE80211_M_WDS:
790 mwl_cleartxq(sc, vap);
791 free(mvp, M_80211_VAP);
792 if (parent->if_drv_flags & IFF_DRV_RUNNING)
793 mwl_hal_intrset(mh, sc->sc_imask);
797 mwl_suspend(struct mwl_softc *sc)
799 struct ifnet *ifp = sc->sc_ifp;
801 DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
802 __func__, ifp->if_flags);
808 mwl_resume(struct mwl_softc *sc)
810 struct ifnet *ifp = sc->sc_ifp;
812 DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
813 __func__, ifp->if_flags);
815 if (ifp->if_flags & IFF_UP)
820 mwl_shutdown(void *arg)
822 struct mwl_softc *sc = arg;
824 mwl_stop(sc->sc_ifp, 1);
828 * Interrupt handler. Most of the actual processing is deferred.
833 struct mwl_softc *sc = arg;
834 struct mwl_hal *mh = sc->sc_mh;
837 if (sc->sc_invalid) {
839 * The hardware is not ready/present, don't touch anything.
840 * Note this can happen early on if the IRQ is shared.
842 DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
846 * Figure out the reason(s) for the interrupt.
848 mwl_hal_getisr(mh, &status); /* NB: clears ISR too */
849 if (status == 0) /* must be a shared irq */
852 DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
853 __func__, status, sc->sc_imask);
854 if (status & MACREG_A2HRIC_BIT_RX_RDY)
855 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
856 if (status & MACREG_A2HRIC_BIT_TX_DONE)
857 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
858 if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
859 taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
860 if (status & MACREG_A2HRIC_BIT_OPC_DONE)
862 if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
865 if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
867 sc->sc_stats.mst_rx_badtkipicv++;
869 if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
870 /* 11n aggregation queue is empty, re-fill */
873 if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
876 if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
877 /* radar detected, process event */
878 taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
880 if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
881 /* DFS channel switch */
882 taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
887 mwl_radar_proc(void *arg, int pending)
889 struct mwl_softc *sc = arg;
890 struct ifnet *ifp = sc->sc_ifp;
891 struct ieee80211com *ic = ifp->if_l2com;
893 DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
896 sc->sc_stats.mst_radardetect++;
897 /* XXX stop h/w BA streams? */
900 ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
901 IEEE80211_UNLOCK(ic);
905 mwl_chanswitch_proc(void *arg, int pending)
907 struct mwl_softc *sc = arg;
908 struct ifnet *ifp = sc->sc_ifp;
909 struct ieee80211com *ic = ifp->if_l2com;
911 DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
915 sc->sc_csapending = 0;
916 ieee80211_csa_completeswitch(ic);
917 IEEE80211_UNLOCK(ic);
921 mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
923 struct ieee80211_node *ni = sp->data[0];
925 /* send DELBA and drop the stream */
926 ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
930 mwl_bawatchdog_proc(void *arg, int pending)
932 struct mwl_softc *sc = arg;
933 struct mwl_hal *mh = sc->sc_mh;
934 const MWL_HAL_BASTREAM *sp;
937 sc->sc_stats.mst_bawatchdog++;
939 if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
940 DPRINTF(sc, MWL_DEBUG_AMPDU,
941 "%s: could not get bitmap\n", __func__);
942 sc->sc_stats.mst_bawatchdog_failed++;
945 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
946 if (bitmap == 0xff) {
948 /* disable all ba streams */
949 for (bitmap = 0; bitmap < 8; bitmap++) {
950 sp = mwl_hal_bastream_lookup(mh, bitmap);
957 DPRINTF(sc, MWL_DEBUG_AMPDU,
958 "%s: no BA streams found\n", __func__);
959 sc->sc_stats.mst_bawatchdog_empty++;
961 } else if (bitmap != 0xaa) {
962 /* disable a single ba stream */
963 sp = mwl_hal_bastream_lookup(mh, bitmap);
967 DPRINTF(sc, MWL_DEBUG_AMPDU,
968 "%s: no BA stream %d\n", __func__, bitmap);
969 sc->sc_stats.mst_bawatchdog_notfound++;
975 * Convert net80211 channel to a HAL channel.
978 mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
980 hc->channel = chan->ic_ieee;
982 *(uint32_t *)&hc->channelFlags = 0;
983 if (IEEE80211_IS_CHAN_2GHZ(chan))
984 hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
985 else if (IEEE80211_IS_CHAN_5GHZ(chan))
986 hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
987 if (IEEE80211_IS_CHAN_HT40(chan)) {
988 hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
989 if (IEEE80211_IS_CHAN_HT40U(chan))
990 hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
992 hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
994 hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
995 /* XXX 10MHz channels */
999 * Inform firmware of our tx/rx dma setup. The BAR 0
1000 * writes below are for compatibility with older firmware.
1001 * For current firmware we send this information with a
1002 * cmd block via mwl_hal_sethwdma.
1005 mwl_setupdma(struct mwl_softc *sc)
1009 sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
1010 WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
1011 WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
1013 for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
1014 struct mwl_txq *txq = &sc->sc_txq[i];
1015 sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
1016 WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
1018 sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
1019 sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
1021 error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
1023 device_printf(sc->sc_dev,
1024 "unable to setup tx/rx dma; hal status %u\n", error);
1031 * Inform firmware of tx rate parameters.
1032 * Called after a channel change.
1035 mwl_setcurchanrates(struct mwl_softc *sc)
1037 struct ifnet *ifp = sc->sc_ifp;
1038 struct ieee80211com *ic = ifp->if_l2com;
1039 const struct ieee80211_rateset *rs;
1040 MWL_HAL_TXRATE rates;
1042 memset(&rates, 0, sizeof(rates));
1043 rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1044 /* rate used to send management frames */
1045 rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1046 /* rate used to send multicast frames */
1047 rates.McastRate = rates.MgtRate;
1049 return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1053 * Inform firmware of tx rate parameters. Called whenever
1054 * user-settable params change and after a channel change.
1057 mwl_setrates(struct ieee80211vap *vap)
1059 struct mwl_vap *mvp = MWL_VAP(vap);
1060 struct ieee80211_node *ni = vap->iv_bss;
1061 const struct ieee80211_txparam *tp = ni->ni_txparms;
1062 MWL_HAL_TXRATE rates;
1064 KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1067 * Update the h/w rate map.
1068 * NB: 0x80 for MCS is passed through unchanged
1070 memset(&rates, 0, sizeof(rates));
1071 /* rate used to send management frames */
1072 rates.MgtRate = tp->mgmtrate;
1073 /* rate used to send multicast frames */
1074 rates.McastRate = tp->mcastrate;
1076 /* while here calculate EAPOL fixed rate cookie */
1077 mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1079 return mwl_hal_settxrate(mvp->mv_hvap,
1080 tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1081 RATE_FIXED : RATE_AUTO, &rates);
1085 * Setup a fixed xmit rate cookie for EAPOL frames.
1088 mwl_seteapolformat(struct ieee80211vap *vap)
1090 struct mwl_vap *mvp = MWL_VAP(vap);
1091 struct ieee80211_node *ni = vap->iv_bss;
1092 enum ieee80211_phymode mode;
1095 KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1097 mode = ieee80211_chan2mode(ni->ni_chan);
1099 * Use legacy rates when operating a mixed HT+non-HT bss.
1100 * NB: this may violate POLA for sta and wds vap's.
1102 if (mode == IEEE80211_MODE_11NA &&
1103 (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1104 rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1105 else if (mode == IEEE80211_MODE_11NG &&
1106 (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1107 rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1109 rate = vap->iv_txparms[mode].mgmtrate;
1111 mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1115 * Map SKU+country code to region code for radar bin'ing.
1118 mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1120 switch (rd->regdomain) {
1123 return DOMAIN_CODE_FCC;
1125 return DOMAIN_CODE_IC;
1129 if (rd->country == CTRY_SPAIN)
1130 return DOMAIN_CODE_SPAIN;
1131 if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1132 return DOMAIN_CODE_FRANCE;
1133 /* XXX force 1.3.1 radar type */
1134 return DOMAIN_CODE_ETSI_131;
1136 return DOMAIN_CODE_MKK;
1138 return DOMAIN_CODE_DGT; /* Taiwan */
1142 return DOMAIN_CODE_AUS; /* Australia */
1145 return DOMAIN_CODE_FCC; /* XXX? */
1149 mwl_hal_reset(struct mwl_softc *sc)
1151 struct ifnet *ifp = sc->sc_ifp;
1152 struct ieee80211com *ic = ifp->if_l2com;
1153 struct mwl_hal *mh = sc->sc_mh;
1155 mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1156 mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1157 mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1158 mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1159 mwl_chan_set(sc, ic->ic_curchan);
1160 /* NB: RF/RA performance tuned for indoor mode */
1161 mwl_hal_setrateadaptmode(mh, 0);
1162 mwl_hal_setoptimizationlevel(mh,
1163 (ic->ic_flags & IEEE80211_F_BURST) != 0);
1165 mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1167 mwl_hal_setaggampduratemode(mh, 1, 80); /* XXX */
1168 mwl_hal_setcfend(mh, 0); /* XXX */
1174 mwl_init_locked(struct mwl_softc *sc)
1176 struct ifnet *ifp = sc->sc_ifp;
1177 struct mwl_hal *mh = sc->sc_mh;
1180 DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1181 __func__, ifp->if_flags);
1183 MWL_LOCK_ASSERT(sc);
1186 * Stop anything previously setup. This is safe
1187 * whether this is the first time through or not.
1189 mwl_stop_locked(ifp, 0);
1192 * Push vap-independent state to the firmware.
1194 if (!mwl_hal_reset(sc)) {
1195 if_printf(ifp, "unable to reset hardware\n");
1200 * Setup recv (once); transmit is already good to go.
1202 error = mwl_startrecv(sc);
1204 if_printf(ifp, "unable to start recv logic\n");
1209 * Enable interrupts.
1211 sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1212 | MACREG_A2HRIC_BIT_TX_DONE
1213 | MACREG_A2HRIC_BIT_OPC_DONE
1215 | MACREG_A2HRIC_BIT_MAC_EVENT
1217 | MACREG_A2HRIC_BIT_ICV_ERROR
1218 | MACREG_A2HRIC_BIT_RADAR_DETECT
1219 | MACREG_A2HRIC_BIT_CHAN_SWITCH
1221 | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1223 | MACREG_A2HRIC_BIT_BA_WATCHDOG
1224 | MACREQ_A2HRIC_BIT_TX_ACK
1227 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1228 mwl_hal_intrset(mh, sc->sc_imask);
1229 callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1237 struct mwl_softc *sc = arg;
1238 struct ifnet *ifp = sc->sc_ifp;
1239 struct ieee80211com *ic = ifp->if_l2com;
1242 DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1243 __func__, ifp->if_flags);
1246 error = mwl_init_locked(sc);
1250 ieee80211_start_all(ic); /* start all vap's */
1254 mwl_stop_locked(struct ifnet *ifp, int disable)
1256 struct mwl_softc *sc = ifp->if_softc;
1258 DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1259 __func__, sc->sc_invalid, ifp->if_flags);
1261 MWL_LOCK_ASSERT(sc);
1262 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1264 * Shutdown the hardware and driver.
1266 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1267 callout_stop(&sc->sc_watchdog);
1268 sc->sc_tx_timer = 0;
1274 mwl_stop(struct ifnet *ifp, int disable)
1276 struct mwl_softc *sc = ifp->if_softc;
1279 mwl_stop_locked(ifp, disable);
1284 mwl_reset_vap(struct ieee80211vap *vap, int state)
1286 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1287 struct ieee80211com *ic = vap->iv_ic;
1289 if (state == IEEE80211_S_RUN)
1292 mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1293 /* XXX auto? 20/40 split? */
1294 mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1295 (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1296 mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1297 HTPROTECT_NONE : HTPROTECT_AUTO);
1298 /* XXX txpower cap */
1300 /* re-setup beacons */
1301 if (state == IEEE80211_S_RUN &&
1302 (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1303 vap->iv_opmode == IEEE80211_M_MBSS ||
1304 vap->iv_opmode == IEEE80211_M_IBSS)) {
1305 mwl_setapmode(vap, vap->iv_bss->ni_chan);
1306 mwl_hal_setnprotmode(hvap,
1307 MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1308 return mwl_beacon_setup(vap);
1314 * Reset the hardware w/o losing operational state.
1315 * Used to to reset or reload hardware state for a vap.
1318 mwl_reset(struct ieee80211vap *vap, u_long cmd)
1320 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1323 if (hvap != NULL) { /* WDS, MONITOR, etc. */
1324 struct ieee80211com *ic = vap->iv_ic;
1325 struct ifnet *ifp = ic->ic_ifp;
1326 struct mwl_softc *sc = ifp->if_softc;
1327 struct mwl_hal *mh = sc->sc_mh;
1329 /* XXX handle DWDS sta vap change */
1330 /* XXX do we need to disable interrupts? */
1331 mwl_hal_intrset(mh, 0); /* disable interrupts */
1332 error = mwl_reset_vap(vap, vap->iv_state);
1333 mwl_hal_intrset(mh, sc->sc_imask);
1339 * Allocate a tx buffer for sending a frame. The
1340 * packet is assumed to have the WME AC stored so
1341 * we can use it to select the appropriate h/w queue.
1343 static struct mwl_txbuf *
1344 mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1346 struct mwl_txbuf *bf;
1349 * Grab a TX buffer and associated resources.
1352 bf = STAILQ_FIRST(&txq->free);
1354 STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1357 MWL_TXQ_UNLOCK(txq);
1359 DPRINTF(sc, MWL_DEBUG_XMIT,
1360 "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1365 * Return a tx buffer to the queue it came from. Note there
1366 * are two cases because we must preserve the order of buffers
1367 * as it reflects the fixed order of descriptors in memory
1368 * (the firmware pre-fetches descriptors so we cannot reorder).
1371 mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1376 STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1378 MWL_TXQ_UNLOCK(txq);
1382 mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1387 STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1389 MWL_TXQ_UNLOCK(txq);
1393 mwl_start(struct ifnet *ifp)
1395 struct mwl_softc *sc = ifp->if_softc;
1396 struct ieee80211_node *ni;
1397 struct mwl_txbuf *bf;
1399 struct mwl_txq *txq = NULL; /* XXX silence gcc */
1402 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1407 IFQ_DEQUEUE(&ifp->if_snd, m);
1411 * Grab the node for the destination.
1413 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1414 KASSERT(ni != NULL, ("no node"));
1415 m->m_pkthdr.rcvif = NULL; /* committed, clear ref */
1417 * Grab a TX buffer and associated resources.
1418 * We honor the classification by the 802.11 layer.
1420 txq = sc->sc_ac2q[M_WME_GETAC(m)];
1421 bf = mwl_gettxbuf(sc, txq);
1424 ieee80211_free_node(ni);
1425 #ifdef MWL_TX_NODROP
1426 sc->sc_stats.mst_tx_qstop++;
1427 /* XXX blocks other traffic */
1428 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1431 DPRINTF(sc, MWL_DEBUG_XMIT,
1432 "%s: tail drop on q %d\n", __func__, txq->qnum);
1433 sc->sc_stats.mst_tx_qdrop++;
1435 #endif /* MWL_TX_NODROP */
1439 * Pass the frame to the h/w for transmission.
1441 if (mwl_tx_start(sc, ni, bf, m)) {
1443 mwl_puttxbuf_head(txq, bf);
1444 ieee80211_free_node(ni);
1448 if (nqueued >= mwl_txcoalesce) {
1450 * Poke the firmware to process queued frames;
1451 * see below about (lack of) locking.
1454 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1459 * NB: We don't need to lock against tx done because
1460 * this just prods the firmware to check the transmit
1461 * descriptors. The firmware will also start fetching
1462 * descriptors by itself if it notices new ones are
1463 * present when it goes to deliver a tx done interrupt
1464 * to the host. So if we race with tx done processing
1465 * it's ok. Delivering the kick here rather than in
1466 * mwl_tx_start is an optimization to avoid poking the
1467 * firmware for each packet.
1469 * NB: the queue id isn't used so 0 is ok.
1471 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1476 mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1477 const struct ieee80211_bpf_params *params)
1479 struct ieee80211com *ic = ni->ni_ic;
1480 struct ifnet *ifp = ic->ic_ifp;
1481 struct mwl_softc *sc = ifp->if_softc;
1482 struct mwl_txbuf *bf;
1483 struct mwl_txq *txq;
1485 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
1486 ieee80211_free_node(ni);
1491 * Grab a TX buffer and associated resources.
1492 * Note that we depend on the classification
1493 * by the 802.11 layer to get to the right h/w
1494 * queue. Management frames must ALWAYS go on
1495 * queue 1 but we cannot just force that here
1496 * because we may receive non-mgt frames.
1498 txq = sc->sc_ac2q[M_WME_GETAC(m)];
1499 bf = mwl_gettxbuf(sc, txq);
1501 sc->sc_stats.mst_tx_qstop++;
1502 /* XXX blocks other traffic */
1503 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1504 ieee80211_free_node(ni);
1509 * Pass the frame to the h/w for transmission.
1511 if (mwl_tx_start(sc, ni, bf, m)) {
1513 mwl_puttxbuf_head(txq, bf);
1515 ieee80211_free_node(ni);
1516 return EIO; /* XXX */
1519 * NB: We don't need to lock against tx done because
1520 * this just prods the firmware to check the transmit
1521 * descriptors. The firmware will also start fetching
1522 * descriptors by itself if it notices new ones are
1523 * present when it goes to deliver a tx done interrupt
1524 * to the host. So if we race with tx done processing
1525 * it's ok. Delivering the kick here rather than in
1526 * mwl_tx_start is an optimization to avoid poking the
1527 * firmware for each packet.
1529 * NB: the queue id isn't used so 0 is ok.
1531 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1536 mwl_media_change(struct ifnet *ifp)
1538 struct ieee80211vap *vap = ifp->if_softc;
1541 error = ieee80211_media_change(ifp);
1542 /* NB: only the fixed rate can change and that doesn't need a reset */
1543 if (error == ENETRESET) {
1552 mwl_keyprint(struct mwl_softc *sc, const char *tag,
1553 const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1555 static const char *ciphers[] = {
1562 printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1563 for (i = 0, n = hk->keyLen; i < n; i++)
1564 printf(" %02x", hk->key.aes[i]);
1565 printf(" mac %s", ether_sprintf(mac));
1566 if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1567 printf(" %s", "rxmic");
1568 for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1569 printf(" %02x", hk->key.tkip.rxMic[i]);
1571 for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1572 printf(" %02x", hk->key.tkip.txMic[i]);
1574 printf(" flags 0x%x\n", hk->keyFlags);
1579 * Allocate a key cache slot for a unicast key. The
1580 * firmware handles key allocation and every station is
1581 * guaranteed key space so we are always successful.
1584 mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1585 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1587 struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1589 if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1590 (k->wk_flags & IEEE80211_KEY_GROUP)) {
1591 if (!(&vap->iv_nw_keys[0] <= k &&
1592 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1593 /* should not happen */
1594 DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1595 "%s: bogus group key\n", __func__);
1598 /* give the caller what they requested */
1599 *keyix = *rxkeyix = k - vap->iv_nw_keys;
1602 * Firmware handles key allocation.
1604 *keyix = *rxkeyix = 0;
1610 * Delete a key entry allocated by mwl_key_alloc.
1613 mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1615 struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1616 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1618 const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1619 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1622 if (vap->iv_opmode != IEEE80211_M_WDS) {
1623 /* XXX monitor mode? */
1624 DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1625 "%s: no hvap for opmode %d\n", __func__,
1629 hvap = MWL_VAP(vap)->mv_ap_hvap;
1632 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1633 __func__, k->wk_keyix);
1635 memset(&hk, 0, sizeof(hk));
1636 hk.keyIndex = k->wk_keyix;
1637 switch (k->wk_cipher->ic_cipher) {
1638 case IEEE80211_CIPHER_WEP:
1639 hk.keyTypeId = KEY_TYPE_ID_WEP;
1641 case IEEE80211_CIPHER_TKIP:
1642 hk.keyTypeId = KEY_TYPE_ID_TKIP;
1644 case IEEE80211_CIPHER_AES_CCM:
1645 hk.keyTypeId = KEY_TYPE_ID_AES;
1648 /* XXX should not happen */
1649 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1650 __func__, k->wk_cipher->ic_cipher);
1653 return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0); /*XXX*/
1657 addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1659 if (k->wk_flags & IEEE80211_KEY_GROUP) {
1660 if (k->wk_flags & IEEE80211_KEY_XMIT)
1661 hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1662 if (k->wk_flags & IEEE80211_KEY_RECV)
1663 hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1670 * Set the key cache contents for the specified key. Key cache
1671 * slot(s) must already have been allocated by mwl_key_alloc.
1674 mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1675 const uint8_t mac[IEEE80211_ADDR_LEN])
1677 #define GRPXMIT (IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1678 /* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1679 #define IEEE80211_IS_STATICKEY(k) \
1680 (((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1681 (GRPXMIT|IEEE80211_KEY_RECV))
1682 struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1683 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1684 const struct ieee80211_cipher *cip = k->wk_cipher;
1685 const uint8_t *macaddr;
1688 KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1689 ("s/w crypto set?"));
1692 if (vap->iv_opmode != IEEE80211_M_WDS) {
1693 /* XXX monitor mode? */
1694 DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1695 "%s: no hvap for opmode %d\n", __func__,
1699 hvap = MWL_VAP(vap)->mv_ap_hvap;
1701 memset(&hk, 0, sizeof(hk));
1702 hk.keyIndex = k->wk_keyix;
1703 switch (cip->ic_cipher) {
1704 case IEEE80211_CIPHER_WEP:
1705 hk.keyTypeId = KEY_TYPE_ID_WEP;
1706 hk.keyLen = k->wk_keylen;
1707 if (k->wk_keyix == vap->iv_def_txkey)
1708 hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1709 if (!IEEE80211_IS_STATICKEY(k)) {
1710 /* NB: WEP is never used for the PTK */
1711 (void) addgroupflags(&hk, k);
1714 case IEEE80211_CIPHER_TKIP:
1715 hk.keyTypeId = KEY_TYPE_ID_TKIP;
1716 hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1717 hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1718 hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1719 hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1720 if (!addgroupflags(&hk, k))
1721 hk.keyFlags |= KEY_FLAG_PAIRWISE;
1723 case IEEE80211_CIPHER_AES_CCM:
1724 hk.keyTypeId = KEY_TYPE_ID_AES;
1725 hk.keyLen = k->wk_keylen;
1726 if (!addgroupflags(&hk, k))
1727 hk.keyFlags |= KEY_FLAG_PAIRWISE;
1730 /* XXX should not happen */
1731 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1732 __func__, k->wk_cipher->ic_cipher);
1736 * NB: tkip mic keys get copied here too; the layout
1737 * just happens to match that in ieee80211_key.
1739 memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1742 * Locate address of sta db entry for writing key;
1743 * the convention unfortunately is somewhat different
1744 * than how net80211, hostapd, and wpa_supplicant think.
1746 if (vap->iv_opmode == IEEE80211_M_STA) {
1748 * NB: keys plumbed before the sta reaches AUTH state
1749 * will be discarded or written to the wrong sta db
1750 * entry because iv_bss is meaningless. This is ok
1751 * (right now) because we handle deferred plumbing of
1752 * WEP keys when the sta reaches AUTH state.
1754 macaddr = vap->iv_bss->ni_bssid;
1755 if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1756 /* XXX plumb to local sta db too for static key wep */
1757 mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1759 } else if (vap->iv_opmode == IEEE80211_M_WDS &&
1760 vap->iv_state != IEEE80211_S_RUN) {
1762 * Prior to RUN state a WDS vap will not it's BSS node
1763 * setup so we will plumb the key to the wrong mac
1764 * address (it'll be our local address). Workaround
1765 * this for the moment by grabbing the correct address.
1767 macaddr = vap->iv_des_bssid;
1768 } else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1769 macaddr = vap->iv_myaddr;
1772 KEYPRINTF(sc, &hk, macaddr);
1773 return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1774 #undef IEEE80211_IS_STATICKEY
1778 /* unaligned little endian access */
1779 #define LE_READ_2(p) \
1781 ((((const uint8_t *)(p))[0] ) | \
1782 (((const uint8_t *)(p))[1] << 8)))
1783 #define LE_READ_4(p) \
1785 ((((const uint8_t *)(p))[0] ) | \
1786 (((const uint8_t *)(p))[1] << 8) | \
1787 (((const uint8_t *)(p))[2] << 16) | \
1788 (((const uint8_t *)(p))[3] << 24)))
1791 * Set the multicast filter contents into the hardware.
1792 * XXX f/w has no support; just defer to the os.
1795 mwl_setmcastfilter(struct mwl_softc *sc)
1797 struct ifnet *ifp = sc->sc_ifp;
1799 struct ether_multi *enm;
1800 struct ether_multistep estep;
1801 uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1807 ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1808 while (enm != NULL) {
1809 /* XXX Punt on ranges. */
1810 if (nmc == MWL_HAL_MCAST_MAX ||
1811 !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1812 ifp->if_flags |= IFF_ALLMULTI;
1815 IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1816 mp += IEEE80211_ADDR_LEN, nmc++;
1817 ETHER_NEXT_MULTI(estep, enm);
1819 ifp->if_flags &= ~IFF_ALLMULTI;
1820 mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1822 /* XXX no mcast filter support; we get everything */
1823 ifp->if_flags |= IFF_ALLMULTI;
1828 mwl_mode_init(struct mwl_softc *sc)
1830 struct ifnet *ifp = sc->sc_ifp;
1831 struct ieee80211com *ic = ifp->if_l2com;
1832 struct mwl_hal *mh = sc->sc_mh;
1835 * NB: Ignore promisc in hostap mode; it's set by the
1836 * bridge. This is wrong but we have no way to
1837 * identify internal requests (from the bridge)
1838 * versus external requests such as for tcpdump.
1840 mwl_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) &&
1841 ic->ic_opmode != IEEE80211_M_HOSTAP);
1842 mwl_setmcastfilter(sc);
1848 * Callback from the 802.11 layer after a multicast state change.
1851 mwl_update_mcast(struct ifnet *ifp)
1853 struct mwl_softc *sc = ifp->if_softc;
1855 mwl_setmcastfilter(sc);
1859 * Callback from the 802.11 layer after a promiscuous mode change.
1860 * Note this interface does not check the operating mode as this
1861 * is an internal callback and we are expected to honor the current
1862 * state (e.g. this is used for setting the interface in promiscuous
1863 * mode when operating in hostap mode to do ACS).
1866 mwl_update_promisc(struct ifnet *ifp)
1868 struct mwl_softc *sc = ifp->if_softc;
1870 mwl_hal_setpromisc(sc->sc_mh, (ifp->if_flags & IFF_PROMISC) != 0);
1874 * Callback from the 802.11 layer to update the slot time
1875 * based on the current setting. We use it to notify the
1876 * firmware of ERP changes and the f/w takes care of things
1877 * like slot time and preamble.
1880 mwl_updateslot(struct ifnet *ifp)
1882 struct mwl_softc *sc = ifp->if_softc;
1883 struct ieee80211com *ic = ifp->if_l2com;
1884 struct mwl_hal *mh = sc->sc_mh;
1887 /* NB: can be called early; suppress needless cmds */
1888 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1892 * Calculate the ERP flags. The firwmare will use
1893 * this to carry out the appropriate measures.
1896 if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1897 if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1898 prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1899 if (ic->ic_flags & IEEE80211_F_USEPROT)
1900 prot |= IEEE80211_ERP_USE_PROTECTION;
1901 if (ic->ic_flags & IEEE80211_F_USEBARKER)
1902 prot |= IEEE80211_ERP_LONG_PREAMBLE;
1905 DPRINTF(sc, MWL_DEBUG_RESET,
1906 "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1907 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1908 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1911 mwl_hal_setgprot(mh, prot);
1915 * Setup the beacon frame.
1918 mwl_beacon_setup(struct ieee80211vap *vap)
1920 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1921 struct ieee80211_node *ni = vap->iv_bss;
1922 struct ieee80211_beacon_offsets bo;
1925 m = ieee80211_beacon_alloc(ni, &bo);
1928 mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1935 * Update the beacon frame in response to a change.
1938 mwl_beacon_update(struct ieee80211vap *vap, int item)
1940 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1941 struct ieee80211com *ic = vap->iv_ic;
1943 KASSERT(hvap != NULL, ("no beacon"));
1945 case IEEE80211_BEACON_ERP:
1946 mwl_updateslot(ic->ic_ifp);
1948 case IEEE80211_BEACON_HTINFO:
1949 mwl_hal_setnprotmode(hvap,
1950 MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1952 case IEEE80211_BEACON_CAPS:
1953 case IEEE80211_BEACON_WME:
1954 case IEEE80211_BEACON_APPIE:
1955 case IEEE80211_BEACON_CSA:
1957 case IEEE80211_BEACON_TIM:
1958 /* NB: firmware always forms TIM */
1961 /* XXX retain beacon frame and update */
1962 mwl_beacon_setup(vap);
1966 mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1968 bus_addr_t *paddr = (bus_addr_t*) arg;
1969 KASSERT(error == 0, ("error %u on bus_dma callback", error));
1970 *paddr = segs->ds_addr;
1973 #ifdef MWL_HOST_PS_SUPPORT
1975 * Handle power save station occupancy changes.
1978 mwl_update_ps(struct ieee80211vap *vap, int nsta)
1980 struct mwl_vap *mvp = MWL_VAP(vap);
1982 if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1983 mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1984 mvp->mv_last_ps_sta = nsta;
1988 * Handle associated station power save state changes.
1991 mwl_set_tim(struct ieee80211_node *ni, int set)
1993 struct ieee80211vap *vap = ni->ni_vap;
1994 struct mwl_vap *mvp = MWL_VAP(vap);
1996 if (mvp->mv_set_tim(ni, set)) { /* NB: state change */
1997 mwl_hal_setpowersave_sta(mvp->mv_hvap,
1998 IEEE80211_AID(ni->ni_associd), set);
2003 #endif /* MWL_HOST_PS_SUPPORT */
2006 mwl_desc_setup(struct mwl_softc *sc, const char *name,
2007 struct mwl_descdma *dd,
2008 int nbuf, size_t bufsize, int ndesc, size_t descsize)
2010 struct ifnet *ifp = sc->sc_ifp;
2014 DPRINTF(sc, MWL_DEBUG_RESET,
2015 "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
2016 __func__, name, nbuf, (uintmax_t) bufsize,
2017 ndesc, (uintmax_t) descsize);
2020 dd->dd_desc_len = nbuf * ndesc * descsize;
2023 * Setup DMA descriptor area.
2025 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
2026 PAGE_SIZE, 0, /* alignment, bounds */
2027 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2028 BUS_SPACE_MAXADDR, /* highaddr */
2029 NULL, NULL, /* filter, filterarg */
2030 dd->dd_desc_len, /* maxsize */
2032 dd->dd_desc_len, /* maxsegsize */
2033 BUS_DMA_ALLOCNOW, /* flags */
2034 NULL, /* lockfunc */
2038 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2042 /* allocate descriptors */
2043 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
2045 if_printf(ifp, "unable to create dmamap for %s descriptors, "
2046 "error %u\n", dd->dd_name, error);
2050 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2051 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2054 if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2055 "error %u\n", nbuf * ndesc, dd->dd_name, error);
2059 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2060 dd->dd_desc, dd->dd_desc_len,
2061 mwl_load_cb, &dd->dd_desc_paddr,
2064 if_printf(ifp, "unable to map %s descriptors, error %u\n",
2065 dd->dd_name, error);
2070 memset(ds, 0, dd->dd_desc_len);
2071 DPRINTF(sc, MWL_DEBUG_RESET,
2072 "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n",
2073 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
2074 (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
2078 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2080 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2082 bus_dma_tag_destroy(dd->dd_dmat);
2083 memset(dd, 0, sizeof(*dd));
2089 mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
2091 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2092 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2093 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2094 bus_dma_tag_destroy(dd->dd_dmat);
2096 memset(dd, 0, sizeof(*dd));
2100 * Construct a tx q's free list. The order of entries on
2101 * the list must reflect the physical layout of tx descriptors
2102 * because the firmware pre-fetches descriptors.
2104 * XXX might be better to use indices into the buffer array.
2107 mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2109 struct mwl_txbuf *bf;
2112 bf = txq->dma.dd_bufptr;
2113 STAILQ_INIT(&txq->free);
2114 for (i = 0; i < mwl_txbuf; i++, bf++)
2115 STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2119 #define DS2PHYS(_dd, _ds) \
2120 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2123 mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2125 struct ifnet *ifp = sc->sc_ifp;
2126 int error, bsize, i;
2127 struct mwl_txbuf *bf;
2128 struct mwl_txdesc *ds;
2130 error = mwl_desc_setup(sc, "tx", &txq->dma,
2131 mwl_txbuf, sizeof(struct mwl_txbuf),
2132 MWL_TXDESC, sizeof(struct mwl_txdesc));
2136 /* allocate and setup tx buffers */
2137 bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2138 bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2140 if_printf(ifp, "malloc of %u tx buffers failed\n",
2144 txq->dma.dd_bufptr = bf;
2146 ds = txq->dma.dd_desc;
2147 for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2149 bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2150 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2153 if_printf(ifp, "unable to create dmamap for tx "
2154 "buffer %u, error %u\n", i, error);
2158 mwl_txq_reset(sc, txq);
2163 mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2165 struct mwl_txbuf *bf;
2168 bf = txq->dma.dd_bufptr;
2169 for (i = 0; i < mwl_txbuf; i++, bf++) {
2170 KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2171 KASSERT(bf->bf_node == NULL, ("node on free list"));
2172 if (bf->bf_dmamap != NULL)
2173 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2175 STAILQ_INIT(&txq->free);
2177 if (txq->dma.dd_bufptr != NULL) {
2178 free(txq->dma.dd_bufptr, M_MWLDEV);
2179 txq->dma.dd_bufptr = NULL;
2181 if (txq->dma.dd_desc_len != 0)
2182 mwl_desc_cleanup(sc, &txq->dma);
2186 mwl_rxdma_setup(struct mwl_softc *sc)
2188 struct ifnet *ifp = sc->sc_ifp;
2189 int error, jumbosize, bsize, i;
2190 struct mwl_rxbuf *bf;
2191 struct mwl_jumbo *rbuf;
2192 struct mwl_rxdesc *ds;
2195 error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2196 mwl_rxdesc, sizeof(struct mwl_rxbuf),
2197 1, sizeof(struct mwl_rxdesc));
2202 * Receive is done to a private pool of jumbo buffers.
2203 * This allows us to attach to mbuf's and avoid re-mapping
2204 * memory on each rx we post. We allocate a large chunk
2205 * of memory and manage it in the driver. The mbuf free
2206 * callback method is used to reclaim frames after sending
2207 * them up the stack. By default we allocate 2x the number of
2208 * rx descriptors configured so we have some slop to hold
2209 * us while frames are processed.
2211 if (mwl_rxbuf < 2*mwl_rxdesc) {
2213 "too few rx dma buffers (%d); increasing to %d\n",
2214 mwl_rxbuf, 2*mwl_rxdesc);
2215 mwl_rxbuf = 2*mwl_rxdesc;
2217 jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2218 sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2220 error = bus_dma_tag_create(sc->sc_dmat, /* parent */
2221 PAGE_SIZE, 0, /* alignment, bounds */
2222 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2223 BUS_SPACE_MAXADDR, /* highaddr */
2224 NULL, NULL, /* filter, filterarg */
2225 sc->sc_rxmemsize, /* maxsize */
2227 sc->sc_rxmemsize, /* maxsegsize */
2228 BUS_DMA_ALLOCNOW, /* flags */
2229 NULL, /* lockfunc */
2232 error = bus_dmamap_create(sc->sc_rxdmat, BUS_DMA_NOWAIT, &sc->sc_rxmap);
2234 if_printf(ifp, "could not create rx DMA map\n");
2238 error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2239 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2242 if_printf(ifp, "could not alloc %ju bytes of rx DMA memory\n",
2243 (uintmax_t) sc->sc_rxmemsize);
2247 error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2248 sc->sc_rxmem, sc->sc_rxmemsize,
2249 mwl_load_cb, &sc->sc_rxmem_paddr,
2252 if_printf(ifp, "could not load rx DMA map\n");
2257 * Allocate rx buffers and set them up.
2259 bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2260 bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2262 if_printf(ifp, "malloc of %u rx buffers failed\n", bsize);
2265 sc->sc_rxdma.dd_bufptr = bf;
2267 STAILQ_INIT(&sc->sc_rxbuf);
2268 ds = sc->sc_rxdma.dd_desc;
2269 for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2271 bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2272 /* pre-assign dma buffer */
2273 bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2274 /* NB: tail is intentional to preserve descriptor order */
2275 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2279 * Place remainder of dma memory buffers on the free list.
2281 SLIST_INIT(&sc->sc_rxfree);
2282 for (; i < mwl_rxbuf; i++) {
2283 data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2284 rbuf = MWL_JUMBO_DATA2BUF(data);
2285 SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2293 mwl_rxdma_cleanup(struct mwl_softc *sc)
2295 if (sc->sc_rxmap != NULL)
2296 bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2297 if (sc->sc_rxmem != NULL) {
2298 bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2299 sc->sc_rxmem = NULL;
2301 if (sc->sc_rxmap != NULL) {
2302 bus_dmamap_destroy(sc->sc_rxdmat, sc->sc_rxmap);
2303 sc->sc_rxmap = NULL;
2305 if (sc->sc_rxdma.dd_bufptr != NULL) {
2306 free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2307 sc->sc_rxdma.dd_bufptr = NULL;
2309 if (sc->sc_rxdma.dd_desc_len != 0)
2310 mwl_desc_cleanup(sc, &sc->sc_rxdma);
2314 mwl_dma_setup(struct mwl_softc *sc)
2318 error = mwl_rxdma_setup(sc);
2320 mwl_rxdma_cleanup(sc);
2324 for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2325 error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2327 mwl_dma_cleanup(sc);
2335 mwl_dma_cleanup(struct mwl_softc *sc)
2339 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2340 mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2341 mwl_rxdma_cleanup(sc);
2344 static struct ieee80211_node *
2345 mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2347 struct ieee80211com *ic = vap->iv_ic;
2348 struct mwl_softc *sc = ic->ic_ifp->if_softc;
2349 const size_t space = sizeof(struct mwl_node);
2350 struct mwl_node *mn;
2352 mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2357 DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2358 return &mn->mn_node;
2362 mwl_node_cleanup(struct ieee80211_node *ni)
2364 struct ieee80211com *ic = ni->ni_ic;
2365 struct mwl_softc *sc = ic->ic_ifp->if_softc;
2366 struct mwl_node *mn = MWL_NODE(ni);
2368 DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2369 __func__, ni, ni->ni_ic, mn->mn_staid);
2371 if (mn->mn_staid != 0) {
2372 struct ieee80211vap *vap = ni->ni_vap;
2374 if (mn->mn_hvap != NULL) {
2375 if (vap->iv_opmode == IEEE80211_M_STA)
2376 mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2378 mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2381 * NB: legacy WDS peer sta db entry is installed using
2382 * the associate ap's hvap; use it again to delete it.
2383 * XXX can vap be NULL?
2385 else if (vap->iv_opmode == IEEE80211_M_WDS &&
2386 MWL_VAP(vap)->mv_ap_hvap != NULL)
2387 mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2389 delstaid(sc, mn->mn_staid);
2392 sc->sc_node_cleanup(ni);
2396 * Reclaim rx dma buffers from packets sitting on the ampdu
2397 * reorder queue for a station. We replace buffers with a
2398 * system cluster (if available).
2401 mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2408 n = rap->rxa_qframes;
2409 for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2414 /* our dma buffers have a well-known free routine */
2415 if ((m->m_flags & M_EXT) == 0 ||
2416 m->m_ext.ext_free != mwl_ext_free)
2419 * Try to allocate a cluster and move the data.
2421 off = m->m_data - m->m_ext.ext_buf;
2422 if (off + m->m_pkthdr.len > MCLBYTES) {
2423 /* XXX no AMSDU for now */
2426 cl = pool_cache_get_paddr(&mclpool_cache, 0,
2427 &m->m_ext.ext_paddr);
2430 * Copy the existing data to the cluster, remove
2431 * the rx dma buffer, and attach the cluster in
2432 * its place. Note we preserve the offset to the
2433 * data so frames being bridged can still prepend
2434 * their headers without adding another mbuf.
2436 memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2438 MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2439 /* setup mbuf like _MCLGET does */
2440 m->m_flags |= M_CLUSTER | M_EXT_RW;
2441 _MOWNERREF(m, M_EXT | M_CLUSTER);
2442 /* NB: m_data is clobbered by MEXTADDR, adjust */
2450 * Callback to reclaim resources. We first let the
2451 * net80211 layer do it's thing, then if we are still
2452 * blocked by a lack of rx dma buffers we walk the ampdu
2453 * reorder q's to reclaim buffers by copying to a system
2457 mwl_node_drain(struct ieee80211_node *ni)
2459 struct ieee80211com *ic = ni->ni_ic;
2460 struct mwl_softc *sc = ic->ic_ifp->if_softc;
2461 struct mwl_node *mn = MWL_NODE(ni);
2463 DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2464 __func__, ni, ni->ni_vap, mn->mn_staid);
2466 /* NB: call up first to age out ampdu q's */
2467 sc->sc_node_drain(ni);
2469 /* XXX better to not check low water mark? */
2470 if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2471 (ni->ni_flags & IEEE80211_NODE_HT)) {
2474 * Walk the reorder q and reclaim rx dma buffers by copying
2475 * the packet contents into clusters.
2477 for (tid = 0; tid < WME_NUM_TID; tid++) {
2478 struct ieee80211_rx_ampdu *rap;
2480 rap = &ni->ni_rx_ampdu[tid];
2481 if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2483 if (rap->rxa_qframes)
2484 mwl_ampdu_rxdma_reclaim(rap);
2490 mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2492 *rssi = ni->ni_ic->ic_node_getrssi(ni);
2493 #ifdef MWL_ANT_INFO_SUPPORT
2495 /* XXX need to smooth data */
2496 *noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2498 *noise = -95; /* XXX */
2501 *noise = -95; /* XXX */
2506 * Convert Hardware per-antenna rssi info to common format:
2507 * Let a1, a2, a3 represent the amplitudes per chain
2508 * Let amax represent max[a1, a2, a3]
2509 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2510 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2511 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2512 * maintain some extra precision.
2514 * Values are stored in .5 db format capped at 127.
2517 mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2518 struct ieee80211_mimo_info *mi)
2520 #define CVT(_dst, _src) do { \
2521 (_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2); \
2522 (_dst) = (_dst) > 64 ? 127 : ((_dst) << 1); \
2524 static const int8_t logdbtbl[32] = {
2525 0, 0, 24, 38, 48, 56, 62, 68,
2526 72, 76, 80, 83, 86, 89, 92, 94,
2527 96, 98, 100, 102, 104, 106, 107, 109,
2528 110, 112, 113, 115, 116, 117, 118, 119
2530 const struct mwl_node *mn = MWL_NODE_CONST(ni);
2531 uint8_t rssi = mn->mn_ai.rsvd1/2; /* XXX */
2534 rssi_max = mn->mn_ai.rssi_a;
2535 if (mn->mn_ai.rssi_b > rssi_max)
2536 rssi_max = mn->mn_ai.rssi_b;
2537 if (mn->mn_ai.rssi_c > rssi_max)
2538 rssi_max = mn->mn_ai.rssi_c;
2540 CVT(mi->rssi[0], mn->mn_ai.rssi_a);
2541 CVT(mi->rssi[1], mn->mn_ai.rssi_b);
2542 CVT(mi->rssi[2], mn->mn_ai.rssi_c);
2544 mi->noise[0] = mn->mn_ai.nf_a;
2545 mi->noise[1] = mn->mn_ai.nf_b;
2546 mi->noise[2] = mn->mn_ai.nf_c;
2550 static __inline void *
2551 mwl_getrxdma(struct mwl_softc *sc)
2553 struct mwl_jumbo *buf;
2557 * Allocate from jumbo pool.
2559 MWL_RXFREE_LOCK(sc);
2560 buf = SLIST_FIRST(&sc->sc_rxfree);
2562 DPRINTF(sc, MWL_DEBUG_ANY,
2563 "%s: out of rx dma buffers\n", __func__);
2564 sc->sc_stats.mst_rx_nodmabuf++;
2567 SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2569 data = MWL_JUMBO_BUF2DATA(buf);
2571 MWL_RXFREE_UNLOCK(sc);
2575 static __inline void
2576 mwl_putrxdma(struct mwl_softc *sc, void *data)
2578 struct mwl_jumbo *buf;
2580 /* XXX bounds check data */
2581 MWL_RXFREE_LOCK(sc);
2582 buf = MWL_JUMBO_DATA2BUF(data);
2583 SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2585 MWL_RXFREE_UNLOCK(sc);
2589 mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2591 struct mwl_rxdesc *ds;
2594 if (bf->bf_data == NULL) {
2595 bf->bf_data = mwl_getrxdma(sc);
2596 if (bf->bf_data == NULL) {
2597 /* mark descriptor to be skipped */
2598 ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2599 /* NB: don't need PREREAD */
2600 MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2601 sc->sc_stats.mst_rxbuf_failed++;
2606 * NB: DMA buffer contents is known to be unmodified
2607 * so there's no need to flush the data cache.
2615 ds->Status = EAGLE_RXD_STATUS_IDLE;
2617 ds->PktLen = htole16(MWL_AGGR_SIZE);
2619 ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2620 /* NB: don't touch pPhysNext, set once */
2621 ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2622 MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2628 mwl_ext_free(struct mbuf *m, void *data, void *arg)
2630 struct mwl_softc *sc = arg;
2632 /* XXX bounds check data */
2633 mwl_putrxdma(sc, data);
2635 * If we were previously blocked by a lack of rx dma buffers
2636 * check if we now have enough to restart rx interrupt handling.
2637 * NB: we know we are called at splvm which is above splnet.
2639 if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2640 sc->sc_rxblocked = 0;
2641 mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2643 return (EXT_FREE_OK);
2646 struct mwl_frame_bar {
2649 u_int8_t i_ra[IEEE80211_ADDR_LEN];
2650 u_int8_t i_ta[IEEE80211_ADDR_LEN];
2655 * Like ieee80211_anyhdrsize, but handles BAR frames
2656 * specially so the logic below to piece the 802.11
2657 * header together works.
2660 mwl_anyhdrsize(const void *data)
2662 const struct ieee80211_frame *wh = data;
2664 if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2665 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2666 case IEEE80211_FC0_SUBTYPE_CTS:
2667 case IEEE80211_FC0_SUBTYPE_ACK:
2668 return sizeof(struct ieee80211_frame_ack);
2669 case IEEE80211_FC0_SUBTYPE_BAR:
2670 return sizeof(struct mwl_frame_bar);
2672 return sizeof(struct ieee80211_frame_min);
2674 return ieee80211_hdrsize(data);
2678 mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2680 const struct ieee80211_frame *wh;
2681 struct ieee80211_node *ni;
2683 wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2684 ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2686 ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2687 ieee80211_free_node(ni);
2692 * Convert hardware signal strength to rssi. The value
2693 * provided by the device has the noise floor added in;
2694 * we need to compensate for this but we don't have that
2695 * so we use a fixed value.
2697 * The offset of 8 is good for both 2.4 and 5GHz. The LNA
2698 * offset is already set as part of the initial gain. This
2699 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2702 cvtrssi(uint8_t ssi)
2704 int rssi = (int) ssi + 8;
2705 /* XXX hack guess until we have a real noise floor */
2706 rssi = 2*(87 - rssi); /* NB: .5 dBm units */
2707 return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2711 mwl_rx_proc(void *arg, int npending)
2713 #define IEEE80211_DIR_DSTODS(wh) \
2714 ((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2715 struct mwl_softc *sc = arg;
2716 struct ifnet *ifp = sc->sc_ifp;
2717 struct ieee80211com *ic = ifp->if_l2com;
2718 struct mwl_rxbuf *bf;
2719 struct mwl_rxdesc *ds;
2721 struct ieee80211_qosframe *wh;
2722 struct ieee80211_qosframe_addr4 *wh4;
2723 struct ieee80211_node *ni;
2724 struct mwl_node *mn;
2725 int off, len, hdrlen, pktlen, rssi, ntodo;
2726 uint8_t *data, status;
2730 DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2731 __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2732 RD4(sc, sc->sc_hwspecs.rxDescWrite));
2735 for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2737 bf = STAILQ_FIRST(&sc->sc_rxbuf);
2742 * If data allocation failed previously there
2743 * will be no buffer; try again to re-populate it.
2744 * Note the firmware will not advance to the next
2745 * descriptor with a dma buffer so we must mimic
2746 * this or we'll get out of sync.
2748 DPRINTF(sc, MWL_DEBUG_ANY,
2749 "%s: rx buf w/o dma memory\n", __func__);
2750 (void) mwl_rxbuf_init(sc, bf);
2751 sc->sc_stats.mst_rx_dmabufmissing++;
2754 MWL_RXDESC_SYNC(sc, ds,
2755 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2756 if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2759 if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2760 mwl_printrxbuf(bf, 0);
2762 status = ds->Status;
2763 if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2765 sc->sc_stats.mst_rx_crypto++;
2767 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2768 * for backwards compatibility.
2770 if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2771 (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2773 * MIC error, notify upper layers.
2775 bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2776 BUS_DMASYNC_POSTREAD);
2777 mwl_handlemicerror(ic, data);
2778 sc->sc_stats.mst_rx_tkipmic++;
2780 /* XXX too painful to tap packets */
2784 * Sync the data buffer.
2786 len = le16toh(ds->PktLen);
2787 bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2789 * The 802.11 header is provided all or in part at the front;
2790 * use it to calculate the true size of the header that we'll
2791 * construct below. We use this to figure out where to copy
2792 * payload prior to constructing the header.
2794 hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2795 off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2797 /* calculate rssi early so we can re-use for each aggregate */
2798 rssi = cvtrssi(ds->RSSI);
2800 pktlen = hdrlen + (len - off);
2802 * NB: we know our frame is at least as large as
2803 * IEEE80211_MIN_LEN because there is a 4-address
2804 * frame at the front. Hence there's no need to
2805 * vet the packet length. If the frame in fact
2806 * is too small it should be discarded at the
2811 * Attach dma buffer to an mbuf. We tried
2812 * doing this based on the packet size (i.e.
2813 * copying small packets) but it turns out to
2814 * be a net loss. The tradeoff might be system
2815 * dependent (cache architecture is important).
2817 MGETHDR(m, M_NOWAIT, MT_DATA);
2819 DPRINTF(sc, MWL_DEBUG_ANY,
2820 "%s: no rx mbuf\n", __func__);
2821 sc->sc_stats.mst_rx_nombuf++;
2825 * Acquire the replacement dma buffer before
2826 * processing the frame. If we're out of dma
2827 * buffers we disable rx interrupts and wait
2828 * for the free pool to reach mlw_rxdmalow buffers
2829 * before starting to do work again. If the firmware
2830 * runs out of descriptors then it will toss frames
2831 * which is better than our doing it as that can
2832 * starve our processing. It is also important that
2833 * we always process rx'd frames in case they are
2834 * A-MPDU as otherwise the host's view of the BA
2835 * window may get out of sync with the firmware.
2837 newdata = mwl_getrxdma(sc);
2838 if (newdata == NULL) {
2839 /* NB: stat+msg in mwl_getrxdma */
2841 /* disable RX interrupt and mark state */
2842 mwl_hal_intrset(sc->sc_mh,
2843 sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2844 sc->sc_rxblocked = 1;
2845 ieee80211_drain(ic);
2846 /* XXX check rxblocked and immediately start again? */
2849 bf->bf_data = newdata;
2851 * Attach the dma buffer to the mbuf;
2852 * mwl_rxbuf_init will re-setup the rx
2853 * descriptor using the replacement dma
2854 * buffer we just installed above.
2856 MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2857 data, sc, 0, EXT_NET_DRV);
2858 m->m_data += off - hdrlen;
2859 m->m_pkthdr.len = m->m_len = pktlen;
2860 m->m_pkthdr.rcvif = ifp;
2861 /* NB: dma buffer assumed read-only */
2864 * Piece 802.11 header together.
2866 wh = mtod(m, struct ieee80211_qosframe *);
2867 /* NB: don't need to do this sometimes but ... */
2868 /* XXX special case so we can memcpy after m_devget? */
2869 ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2870 if (IEEE80211_QOS_HAS_SEQ(wh)) {
2871 if (IEEE80211_DIR_DSTODS(wh)) {
2873 struct ieee80211_qosframe_addr4*);
2874 *(uint16_t *)wh4->i_qos = ds->QosCtrl;
2876 *(uint16_t *)wh->i_qos = ds->QosCtrl;
2880 * The f/w strips WEP header but doesn't clear
2881 * the WEP bit; mark the packet with M_WEP so
2882 * net80211 will treat the data as decrypted.
2883 * While here also clear the PWR_MGT bit since
2884 * power save is handled by the firmware and
2885 * passing this up will potentially cause the
2886 * upper layer to put a station in power save
2887 * (except when configured with MWL_HOST_PS_SUPPORT).
2889 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2890 m->m_flags |= M_WEP;
2891 #ifdef MWL_HOST_PS_SUPPORT
2892 wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
2894 wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED |
2895 IEEE80211_FC1_PWR_MGT);
2898 if (ieee80211_radiotap_active(ic)) {
2899 struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2902 tap->wr_rate = ds->Rate;
2903 tap->wr_antsignal = rssi + nf;
2904 tap->wr_antnoise = nf;
2906 if (IFF_DUMPPKTS_RECV(sc, wh)) {
2907 ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2908 len, ds->Rate, rssi);
2913 ni = ieee80211_find_rxnode(ic,
2914 (const struct ieee80211_frame_min *) wh);
2917 #ifdef MWL_ANT_INFO_SUPPORT
2918 mn->mn_ai.rssi_a = ds->ai.rssi_a;
2919 mn->mn_ai.rssi_b = ds->ai.rssi_b;
2920 mn->mn_ai.rssi_c = ds->ai.rssi_c;
2921 mn->mn_ai.rsvd1 = rssi;
2923 /* tag AMPDU aggregates for reorder processing */
2924 if (ni->ni_flags & IEEE80211_NODE_HT)
2925 m->m_flags |= M_AMPDU;
2926 (void) ieee80211_input(ni, m, rssi, nf);
2927 ieee80211_free_node(ni);
2929 (void) ieee80211_input_all(ic, m, rssi, nf);
2931 /* NB: ignore ENOMEM so we process more descriptors */
2932 (void) mwl_rxbuf_init(sc, bf);
2933 bf = STAILQ_NEXT(bf, bf_list);
2938 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
2939 !IFQ_IS_EMPTY(&ifp->if_snd)) {
2940 /* NB: kick fw; the tx thread may have been preempted */
2941 mwl_hal_txstart(sc->sc_mh, 0);
2944 #undef IEEE80211_DIR_DSTODS
2948 mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2950 struct mwl_txbuf *bf, *bn;
2951 struct mwl_txdesc *ds;
2953 MWL_TXQ_LOCK_INIT(sc, txq);
2955 txq->txpri = 0; /* XXX */
2957 /* NB: q setup by mwl_txdma_setup XXX */
2958 STAILQ_INIT(&txq->free);
2960 STAILQ_FOREACH(bf, &txq->free, bf_list) {
2964 bn = STAILQ_NEXT(bf, bf_list);
2966 bn = STAILQ_FIRST(&txq->free);
2967 ds->pPhysNext = htole32(bn->bf_daddr);
2969 STAILQ_INIT(&txq->active);
2973 * Setup a hardware data transmit queue for the specified
2974 * access control. We record the mapping from ac's
2975 * to h/w queues for use by mwl_tx_start.
2978 mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2980 #define N(a) (sizeof(a)/sizeof(a[0]))
2981 struct mwl_txq *txq;
2983 if (ac >= N(sc->sc_ac2q)) {
2984 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2985 ac, N(sc->sc_ac2q));
2988 if (mvtype >= MWL_NUM_TX_QUEUES) {
2989 device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2990 mvtype, MWL_NUM_TX_QUEUES);
2993 txq = &sc->sc_txq[mvtype];
2994 mwl_txq_init(sc, txq, mvtype);
2995 sc->sc_ac2q[ac] = txq;
3001 * Update WME parameters for a transmit queue.
3004 mwl_txq_update(struct mwl_softc *sc, int ac)
3006 #define MWL_EXPONENT_TO_VALUE(v) ((1<<v)-1)
3007 struct ifnet *ifp = sc->sc_ifp;
3008 struct ieee80211com *ic = ifp->if_l2com;
3009 struct mwl_txq *txq = sc->sc_ac2q[ac];
3010 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
3011 struct mwl_hal *mh = sc->sc_mh;
3012 int aifs, cwmin, cwmax, txoplim;
3014 aifs = wmep->wmep_aifsn;
3015 /* XXX in sta mode need to pass log values for cwmin/max */
3016 cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
3017 cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
3018 txoplim = wmep->wmep_txopLimit; /* NB: units of 32us */
3020 if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
3021 device_printf(sc->sc_dev, "unable to update hardware queue "
3022 "parameters for %s traffic!\n",
3023 ieee80211_wme_acnames[ac]);
3027 #undef MWL_EXPONENT_TO_VALUE
3031 * Callback from the 802.11 layer to update WME parameters.
3034 mwl_wme_update(struct ieee80211com *ic)
3036 struct mwl_softc *sc = ic->ic_ifp->if_softc;
3038 return !mwl_txq_update(sc, WME_AC_BE) ||
3039 !mwl_txq_update(sc, WME_AC_BK) ||
3040 !mwl_txq_update(sc, WME_AC_VI) ||
3041 !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
3045 * Reclaim resources for a setup queue.
3048 mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
3051 MWL_TXQ_LOCK_DESTROY(txq);
3055 * Reclaim all tx queue resources.
3058 mwl_tx_cleanup(struct mwl_softc *sc)
3062 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3063 mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
3067 mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
3073 * Load the DMA map so any coalescing is done. This
3074 * also calculates the number of descriptors we need.
3076 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3077 bf->bf_segs, &bf->bf_nseg,
3079 if (error == EFBIG) {
3080 /* XXX packet requires too many descriptors */
3081 bf->bf_nseg = MWL_TXDESC+1;
3082 } else if (error != 0) {
3083 sc->sc_stats.mst_tx_busdma++;
3088 * Discard null packets and check for packets that
3089 * require too many TX descriptors. We try to convert
3090 * the latter to a cluster.
3092 if (error == EFBIG) { /* too many desc's, linearize */
3093 sc->sc_stats.mst_tx_linear++;
3095 m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
3097 m = m_defrag(m0, M_NOWAIT);
3101 sc->sc_stats.mst_tx_nombuf++;
3105 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3106 bf->bf_segs, &bf->bf_nseg,
3109 sc->sc_stats.mst_tx_busdma++;
3113 KASSERT(bf->bf_nseg <= MWL_TXDESC,
3114 ("too many segments after defrag; nseg %u", bf->bf_nseg));
3115 } else if (bf->bf_nseg == 0) { /* null packet, discard */
3116 sc->sc_stats.mst_tx_nodata++;
3120 DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3121 __func__, m0, m0->m_pkthdr.len);
3122 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3129 mwl_cvtlegacyrate(int rate)
3150 * Calculate fixed tx rate information per client state;
3151 * this value is suitable for writing to the Format field
3152 * of a tx descriptor.
3155 mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3159 fmt = SM(3, EAGLE_TXD_ANTENNA)
3160 | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3161 EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3162 if (rate & IEEE80211_RATE_MCS) { /* HT MCS */
3163 fmt |= EAGLE_TXD_FORMAT_HT
3164 /* NB: 0x80 implicitly stripped from ucastrate */
3165 | SM(rate, EAGLE_TXD_RATE);
3166 /* XXX short/long GI may be wrong; re-check */
3167 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3168 fmt |= EAGLE_TXD_CHW_40
3169 | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3170 EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3172 fmt |= EAGLE_TXD_CHW_20
3173 | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3174 EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3176 } else { /* legacy rate */
3177 fmt |= EAGLE_TXD_FORMAT_LEGACY
3178 | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3180 /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3181 | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3182 EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3188 mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3191 #define IEEE80211_DIR_DSTODS(wh) \
3192 ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
3193 struct ifnet *ifp = sc->sc_ifp;
3194 struct ieee80211com *ic = ifp->if_l2com;
3195 struct ieee80211vap *vap = ni->ni_vap;
3196 int error, iswep, ismcast;
3197 int hdrlen, copyhdrlen, pktlen;
3198 struct mwl_txdesc *ds;
3199 struct mwl_txq *txq;
3200 struct ieee80211_frame *wh;
3201 struct mwltxrec *tr;
3202 struct mwl_node *mn;
3208 wh = mtod(m0, struct ieee80211_frame *);
3209 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
3210 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3211 hdrlen = ieee80211_anyhdrsize(wh);
3212 copyhdrlen = hdrlen;
3213 pktlen = m0->m_pkthdr.len;
3214 if (IEEE80211_QOS_HAS_SEQ(wh)) {
3215 if (IEEE80211_DIR_DSTODS(wh)) {
3217 (((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
3218 copyhdrlen -= sizeof(qos);
3221 (((struct ieee80211_qosframe *) wh)->i_qos);
3226 const struct ieee80211_cipher *cip;
3227 struct ieee80211_key *k;
3230 * Construct the 802.11 header+trailer for an encrypted
3231 * frame. The only reason this can fail is because of an
3232 * unknown or unsupported cipher/key type.
3234 * NB: we do this even though the firmware will ignore
3235 * what we've done for WEP and TKIP as we need the
3236 * ExtIV filled in for CCMP and this also adjusts
3237 * the headers which simplifies our work below.
3239 k = ieee80211_crypto_encap(ni, m0);
3242 * This can happen when the key is yanked after the
3243 * frame was queued. Just discard the frame; the
3244 * 802.11 layer counts failures and provides
3245 * debugging/diagnostics.
3251 * Adjust the packet length for the crypto additions
3252 * done during encap and any other bits that the f/w
3253 * will add later on.
3256 pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3258 /* packet header may have moved, reset our local pointer */
3259 wh = mtod(m0, struct ieee80211_frame *);
3262 if (ieee80211_radiotap_active_vap(vap)) {
3263 sc->sc_tx_th.wt_flags = 0; /* XXX */
3265 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3267 sc->sc_tx_th.wt_rate = ds->DataRate;
3269 sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3270 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3272 ieee80211_radiotap_tx(vap, m0);
3275 * Copy up/down the 802.11 header; the firmware requires
3276 * we present a 2-byte payload length followed by a
3277 * 4-address header (w/o QoS), followed (optionally) by
3278 * any WEP/ExtIV header (but only filled in for CCMP).
3279 * We are assured the mbuf has sufficient headroom to
3280 * prepend in-place by the setup of ic_headroom in
3283 if (hdrlen < sizeof(struct mwltxrec)) {
3284 const int space = sizeof(struct mwltxrec) - hdrlen;
3285 if (M_LEADINGSPACE(m0) < space) {
3286 /* NB: should never happen */
3287 device_printf(sc->sc_dev,
3288 "not enough headroom, need %d found %zd, "
3289 "m_flags 0x%x m_len %d\n",
3290 space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3291 ieee80211_dump_pkt(ic,
3292 mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3294 sc->sc_stats.mst_tx_noheadroom++;
3297 M_PREPEND(m0, space, M_NOWAIT);
3299 tr = mtod(m0, struct mwltxrec *);
3300 if (wh != (struct ieee80211_frame *) &tr->wh)
3301 ovbcopy(wh, &tr->wh, hdrlen);
3303 * Note: the "firmware length" is actually the length
3304 * of the fully formed "802.11 payload". That is, it's
3305 * everything except for the 802.11 header. In particular
3306 * this includes all crypto material including the MIC!
3308 tr->fwlen = htole16(pktlen - hdrlen);
3311 * Load the DMA map so any coalescing is done. This
3312 * also calculates the number of descriptors we need.
3314 error = mwl_tx_dmasetup(sc, bf, m0);
3316 /* NB: stat collected in mwl_tx_dmasetup */
3317 DPRINTF(sc, MWL_DEBUG_XMIT,
3318 "%s: unable to setup dma\n", __func__);
3321 bf->bf_node = ni; /* NB: held reference */
3322 m0 = bf->bf_m; /* NB: may have changed */
3323 tr = mtod(m0, struct mwltxrec *);
3324 wh = (struct ieee80211_frame *)&tr->wh;
3327 * Formulate tx descriptor.
3332 ds->QosCtrl = qos; /* NB: already little-endian */
3335 * NB: multiframes should be zero because the descriptors
3336 * are initialized to zero. This should handle the case
3337 * where the driver is built with MWL_TXDESC=1 but we are
3338 * using firmware with multi-segment support.
3340 ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3341 ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3343 ds->multiframes = htole32(bf->bf_nseg);
3344 ds->PktLen = htole16(m0->m_pkthdr.len);
3345 for (i = 0; i < bf->bf_nseg; i++) {
3346 ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3347 ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3350 /* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3353 ds->ack_wcb_addr = 0;
3357 * Select transmit rate.
3359 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3360 case IEEE80211_FC0_TYPE_MGT:
3361 sc->sc_stats.mst_tx_mgmt++;
3363 case IEEE80211_FC0_TYPE_CTL:
3364 /* NB: assign to BE q to avoid bursting */
3365 ds->TxPriority = MWL_WME_AC_BE;
3367 case IEEE80211_FC0_TYPE_DATA:
3369 const struct ieee80211_txparam *tp = ni->ni_txparms;
3371 * EAPOL frames get forced to a fixed rate and w/o
3372 * aggregation; otherwise check for any fixed rate
3373 * for the client (may depend on association state).
3375 if (m0->m_flags & M_EAPOL) {
3376 const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3377 ds->Format = mvp->mv_eapolformat;
3379 EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3380 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3381 /* XXX pre-calculate per node */
3382 ds->Format = htole16(
3383 mwl_calcformat(tp->ucastrate, ni));
3384 ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3386 /* NB: EAPOL frames will never have qos set */
3388 ds->TxPriority = txq->qnum;
3390 else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3391 ds->TxPriority = mn->mn_ba[3].txq;
3394 else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3395 ds->TxPriority = mn->mn_ba[2].txq;
3398 else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3399 ds->TxPriority = mn->mn_ba[1].txq;
3402 else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3403 ds->TxPriority = mn->mn_ba[0].txq;
3406 ds->TxPriority = txq->qnum;
3408 ds->TxPriority = txq->qnum;
3411 if_printf(ifp, "bogus frame type 0x%x (%s)\n",
3412 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3413 sc->sc_stats.mst_tx_badframetype++;
3418 if (IFF_DUMPPKTS_XMIT(sc))
3419 ieee80211_dump_pkt(ic,
3420 mtod(m0, const uint8_t *)+sizeof(uint16_t),
3421 m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3424 ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3425 STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3426 MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3429 sc->sc_tx_timer = 5;
3430 MWL_TXQ_UNLOCK(txq);
3433 #undef IEEE80211_DIR_DSTODS
3437 mwl_cvtlegacyrix(int rix)
3439 #define N(x) (sizeof(x)/sizeof(x[0]))
3440 static const int ieeerates[] =
3441 { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3442 return (rix < N(ieeerates) ? ieeerates[rix] : 0);
3447 * Process completed xmit descriptors from the specified queue.
3450 mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3452 #define EAGLE_TXD_STATUS_MCAST \
3453 (EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3454 struct ifnet *ifp = sc->sc_ifp;
3455 struct ieee80211com *ic = ifp->if_l2com;
3456 struct mwl_txbuf *bf;
3457 struct mwl_txdesc *ds;
3458 struct ieee80211_node *ni;
3459 struct mwl_node *an;
3463 DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3464 for (nreaped = 0;; nreaped++) {
3466 bf = STAILQ_FIRST(&txq->active);
3468 MWL_TXQ_UNLOCK(txq);
3472 MWL_TXDESC_SYNC(txq, ds,
3473 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3474 if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3475 MWL_TXQ_UNLOCK(txq);
3478 STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3479 MWL_TXQ_UNLOCK(txq);
3482 if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3483 mwl_printtxbuf(bf, txq->qnum, nreaped);
3488 status = le32toh(ds->Status);
3489 if (status & EAGLE_TXD_STATUS_OK) {
3490 uint16_t Format = le16toh(ds->Format);
3491 uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3493 sc->sc_stats.mst_ant_tx[txant]++;
3494 if (status & EAGLE_TXD_STATUS_OK_RETRY)
3495 sc->sc_stats.mst_tx_retries++;
3496 if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3497 sc->sc_stats.mst_tx_mretries++;
3498 if (txq->qnum >= MWL_WME_AC_VO)
3499 ic->ic_wme.wme_hipri_traffic++;
3500 ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3501 if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3502 ni->ni_txrate = mwl_cvtlegacyrix(
3505 ni->ni_txrate |= IEEE80211_RATE_MCS;
3506 sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3508 if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3509 sc->sc_stats.mst_tx_linkerror++;
3510 if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3511 sc->sc_stats.mst_tx_xretries++;
3512 if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3513 sc->sc_stats.mst_tx_aging++;
3514 if (bf->bf_m->m_flags & M_FF)
3515 sc->sc_stats.mst_ff_txerr++;
3518 * Do any tx complete callback. Note this must
3519 * be done before releasing the node reference.
3520 * XXX no way to figure out if frame was ACK'd
3522 if (bf->bf_m->m_flags & M_TXCB) {
3523 /* XXX strip fw len in case header inspected */
3524 m_adj(bf->bf_m, sizeof(uint16_t));
3525 ieee80211_process_callback(ni, bf->bf_m,
3526 (status & EAGLE_TXD_STATUS_OK) == 0);
3529 * Reclaim reference to node.
3531 * NB: the node may be reclaimed here if, for example
3532 * this is a DEAUTH message that was sent and the
3533 * node was timed out due to inactivity.
3535 ieee80211_free_node(ni);
3537 ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3539 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3540 BUS_DMASYNC_POSTWRITE);
3541 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3544 mwl_puttxbuf_tail(txq, bf);
3547 #undef EAGLE_TXD_STATUS_MCAST
3551 * Deferred processing of transmit interrupt; special-cased
3552 * for four hardware queues, 0-3.
3555 mwl_tx_proc(void *arg, int npending)
3557 struct mwl_softc *sc = arg;
3558 struct ifnet *ifp = sc->sc_ifp;
3562 * Process each active queue.
3565 if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3566 nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3567 if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3568 nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3569 if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3570 nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3571 if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3572 nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3575 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3576 sc->sc_tx_timer = 0;
3577 if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
3578 /* NB: kick fw; the tx thread may have been preempted */
3579 mwl_hal_txstart(sc->sc_mh, 0);
3586 mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3588 struct ieee80211_node *ni;
3589 struct mwl_txbuf *bf;
3593 * NB: this assumes output has been stopped and
3594 * we do not need to block mwl_tx_tasklet
3596 for (ix = 0;; ix++) {
3598 bf = STAILQ_FIRST(&txq->active);
3600 MWL_TXQ_UNLOCK(txq);
3603 STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3604 MWL_TXQ_UNLOCK(txq);
3606 if (sc->sc_debug & MWL_DEBUG_RESET) {
3607 struct ifnet *ifp = sc->sc_ifp;
3608 struct ieee80211com *ic = ifp->if_l2com;
3609 const struct mwltxrec *tr =
3610 mtod(bf->bf_m, const struct mwltxrec *);
3611 mwl_printtxbuf(bf, txq->qnum, ix);
3612 ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3613 bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3615 #endif /* MWL_DEBUG */
3616 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3620 * Reclaim node reference.
3622 ieee80211_free_node(ni);
3626 mwl_puttxbuf_tail(txq, bf);
3631 * Drain the transmit queues and reclaim resources.
3634 mwl_draintxq(struct mwl_softc *sc)
3636 struct ifnet *ifp = sc->sc_ifp;
3639 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3640 mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3641 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3642 sc->sc_tx_timer = 0;
3647 * Reset the transmit queues to a pristine state after a fw download.
3650 mwl_resettxq(struct mwl_softc *sc)
3654 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3655 mwl_txq_reset(sc, &sc->sc_txq[i]);
3657 #endif /* MWL_DIAGAPI */
3660 * Clear the transmit queues of any frames submitted for the
3661 * specified vap. This is done when the vap is deleted so we
3662 * don't potentially reference the vap after it is gone.
3663 * Note we cannot remove the frames; we only reclaim the node
3667 mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3669 struct mwl_txq *txq;
3670 struct mwl_txbuf *bf;
3673 for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3674 txq = &sc->sc_txq[i];
3676 STAILQ_FOREACH(bf, &txq->active, bf_list) {
3677 struct ieee80211_node *ni = bf->bf_node;
3678 if (ni != NULL && ni->ni_vap == vap) {
3680 ieee80211_free_node(ni);
3683 MWL_TXQ_UNLOCK(txq);
3688 mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3689 const uint8_t *frm, const uint8_t *efrm)
3691 struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3692 const struct ieee80211_action *ia;
3694 ia = (const struct ieee80211_action *) frm;
3695 if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3696 ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3697 const struct ieee80211_action_ht_mimopowersave *mps =
3698 (const struct ieee80211_action_ht_mimopowersave *) ia;
3700 mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3701 mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3702 MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3705 return sc->sc_recv_action(ni, wh, frm, efrm);
3709 mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3710 int dialogtoken, int baparamset, int batimeout)
3712 struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3713 struct ieee80211vap *vap = ni->ni_vap;
3714 struct mwl_node *mn = MWL_NODE(ni);
3715 struct mwl_bastate *bas;
3717 bas = tap->txa_private;
3719 const MWL_HAL_BASTREAM *sp;
3721 * Check for a free BA stream slot.
3724 if (mn->mn_ba[3].bastream == NULL)
3725 bas = &mn->mn_ba[3];
3729 if (mn->mn_ba[2].bastream == NULL)
3730 bas = &mn->mn_ba[2];
3734 if (mn->mn_ba[1].bastream == NULL)
3735 bas = &mn->mn_ba[1];
3739 if (mn->mn_ba[0].bastream == NULL)
3740 bas = &mn->mn_ba[0];
3744 /* sta already has max BA streams */
3745 /* XXX assign BA stream to highest priority tid */
3746 DPRINTF(sc, MWL_DEBUG_AMPDU,
3747 "%s: already has max bastreams\n", __func__);
3748 sc->sc_stats.mst_ampdu_reject++;
3751 /* NB: no held reference to ni */
3752 sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3753 (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3754 ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
3758 * No available stream, return 0 so no
3759 * a-mpdu aggregation will be done.
3761 DPRINTF(sc, MWL_DEBUG_AMPDU,
3762 "%s: no bastream available\n", __func__);
3763 sc->sc_stats.mst_ampdu_nostream++;
3766 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3768 /* NB: qos is left zero so we won't match in mwl_tx_start */
3770 tap->txa_private = bas;
3772 /* fetch current seq# from the firmware; if available */
3773 if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3774 vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3775 &tap->txa_start) != 0)
3777 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3781 mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3782 int code, int baparamset, int batimeout)
3784 struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3785 struct mwl_bastate *bas;
3787 bas = tap->txa_private;
3789 /* XXX should not happen */
3790 DPRINTF(sc, MWL_DEBUG_AMPDU,
3791 "%s: no BA stream allocated, TID %d\n",
3792 __func__, tap->txa_tid);
3793 sc->sc_stats.mst_addba_nostream++;
3796 if (code == IEEE80211_STATUS_SUCCESS) {
3797 struct ieee80211vap *vap = ni->ni_vap;
3801 * Tell the firmware to setup the BA stream;
3802 * we know resources are available because we
3803 * pre-allocated one before forming the request.
3805 bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3807 bufsiz = IEEE80211_AGGR_BAWMAX;
3808 error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3809 bas->bastream, bufsiz, bufsiz, tap->txa_start);
3812 * Setup failed, return immediately so no a-mpdu
3813 * aggregation will be done.
3815 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3816 mwl_bastream_free(bas);
3817 tap->txa_private = NULL;
3819 DPRINTF(sc, MWL_DEBUG_AMPDU,
3820 "%s: create failed, error %d, bufsiz %d TID %d "
3821 "htparam 0x%x\n", __func__, error, bufsiz,
3822 tap->txa_tid, ni->ni_htparam);
3823 sc->sc_stats.mst_bacreate_failed++;
3826 /* NB: cache txq to avoid ptr indirect */
3827 mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
3828 DPRINTF(sc, MWL_DEBUG_AMPDU,
3829 "%s: bastream %p assigned to txq %d TID %d bufsiz %d "
3830 "htparam 0x%x\n", __func__, bas->bastream,
3831 bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
3834 * Other side NAK'd us; return the resources.
3836 DPRINTF(sc, MWL_DEBUG_AMPDU,
3837 "%s: request failed with code %d, destroy bastream %p\n",
3838 __func__, code, bas->bastream);
3839 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3840 mwl_bastream_free(bas);
3841 tap->txa_private = NULL;
3843 /* NB: firmware sends BAR so we don't need to */
3844 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3848 mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3850 struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3851 struct mwl_bastate *bas;
3853 bas = tap->txa_private;
3855 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3856 __func__, bas->bastream);
3857 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3858 mwl_bastream_free(bas);
3859 tap->txa_private = NULL;
3861 sc->sc_addba_stop(ni, tap);
3865 * Setup the rx data structures. This should only be
3866 * done once or we may get out of sync with the firmware.
3869 mwl_startrecv(struct mwl_softc *sc)
3871 if (!sc->sc_recvsetup) {
3872 struct mwl_rxbuf *bf, *prev;
3873 struct mwl_rxdesc *ds;
3876 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3877 int error = mwl_rxbuf_init(sc, bf);
3879 DPRINTF(sc, MWL_DEBUG_RECV,
3880 "%s: mwl_rxbuf_init failed %d\n",
3886 ds->pPhysNext = htole32(bf->bf_daddr);
3893 htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3895 sc->sc_recvsetup = 1;
3897 mwl_mode_init(sc); /* set filters, etc. */
3901 static MWL_HAL_APMODE
3902 mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3904 MWL_HAL_APMODE mode;
3906 if (IEEE80211_IS_CHAN_HT(chan)) {
3907 if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3908 mode = AP_MODE_N_ONLY;
3909 else if (IEEE80211_IS_CHAN_5GHZ(chan))
3910 mode = AP_MODE_AandN;
3911 else if (vap->iv_flags & IEEE80211_F_PUREG)
3912 mode = AP_MODE_GandN;
3914 mode = AP_MODE_BandGandN;
3915 } else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3916 if (vap->iv_flags & IEEE80211_F_PUREG)
3917 mode = AP_MODE_G_ONLY;
3919 mode = AP_MODE_MIXED;
3920 } else if (IEEE80211_IS_CHAN_B(chan))
3921 mode = AP_MODE_B_ONLY;
3922 else if (IEEE80211_IS_CHAN_A(chan))
3923 mode = AP_MODE_A_ONLY;
3925 mode = AP_MODE_MIXED; /* XXX should not happen? */
3930 mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3932 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3933 return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3937 * Set/change channels.
3940 mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3942 struct mwl_hal *mh = sc->sc_mh;
3943 struct ifnet *ifp = sc->sc_ifp;
3944 struct ieee80211com *ic = ifp->if_l2com;
3945 MWL_HAL_CHANNEL hchan;
3948 DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3949 __func__, chan->ic_freq, chan->ic_flags);
3952 * Convert to a HAL channel description with
3953 * the flags constrained to reflect the current
3956 mwl_mapchan(&hchan, chan);
3957 mwl_hal_intrset(mh, 0); /* disable interrupts */
3959 mwl_draintxq(sc); /* clear pending tx frames */
3961 mwl_hal_setchannel(mh, &hchan);
3963 * Tx power is cap'd by the regulatory setting and
3964 * possibly a user-set limit. We pass the min of
3965 * these to the hal to apply them to the cal data
3969 maxtxpow = 2*chan->ic_maxregpower;
3970 if (maxtxpow > ic->ic_txpowlimit)
3971 maxtxpow = ic->ic_txpowlimit;
3972 mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3973 /* NB: potentially change mcast/mgt rates */
3974 mwl_setcurchanrates(sc);
3977 * Update internal state.
3979 sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3980 sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3981 if (IEEE80211_IS_CHAN_A(chan)) {
3982 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3983 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3984 } else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3985 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3986 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3988 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3989 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3991 sc->sc_curchan = hchan;
3992 mwl_hal_intrset(mh, sc->sc_imask);
3998 mwl_scan_start(struct ieee80211com *ic)
4000 struct ifnet *ifp = ic->ic_ifp;
4001 struct mwl_softc *sc = ifp->if_softc;
4003 DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
4007 mwl_scan_end(struct ieee80211com *ic)
4009 struct ifnet *ifp = ic->ic_ifp;
4010 struct mwl_softc *sc = ifp->if_softc;
4012 DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
4016 mwl_set_channel(struct ieee80211com *ic)
4018 struct ifnet *ifp = ic->ic_ifp;
4019 struct mwl_softc *sc = ifp->if_softc;
4021 (void) mwl_chan_set(sc, ic->ic_curchan);
4025 * Handle a channel switch request. We inform the firmware
4026 * and mark the global state to suppress various actions.
4027 * NB: we issue only one request to the fw; we may be called
4028 * multiple times if there are multiple vap's.
4031 mwl_startcsa(struct ieee80211vap *vap)
4033 struct ieee80211com *ic = vap->iv_ic;
4034 struct mwl_softc *sc = ic->ic_ifp->if_softc;
4035 MWL_HAL_CHANNEL hchan;
4037 if (sc->sc_csapending)
4040 mwl_mapchan(&hchan, ic->ic_csa_newchan);
4041 /* 1 =>'s quiet channel */
4042 mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
4043 sc->sc_csapending = 1;
4047 * Plumb any static WEP key for the station. This is
4048 * necessary as we must propagate the key from the
4049 * global key table of the vap to each sta db entry.
4052 mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4054 if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
4055 IEEE80211_F_PRIVACY &&
4056 vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
4057 vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
4058 (void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac);
4062 mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
4064 #define WME(ie) ((const struct ieee80211_wme_info *) ie)
4065 struct ieee80211vap *vap = ni->ni_vap;
4066 struct mwl_hal_vap *hvap;
4069 if (vap->iv_opmode == IEEE80211_M_WDS) {
4071 * WDS vap's do not have a f/w vap; instead they piggyback
4072 * on an AP vap and we must install the sta db entry and
4073 * crypto state using that AP's handle (the WDS vap has none).
4075 hvap = MWL_VAP(vap)->mv_ap_hvap;
4077 hvap = MWL_VAP(vap)->mv_hvap;
4078 error = mwl_hal_newstation(hvap, ni->ni_macaddr,
4080 ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
4081 ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
4084 * Setup security for this station. For sta mode this is
4085 * needed even though do the same thing on transition to
4086 * AUTH state because the call to mwl_hal_newstation
4087 * clobbers the crypto state we setup.
4089 mwl_setanywepkey(vap, ni->ni_macaddr);
4096 mwl_setglobalkeys(struct ieee80211vap *vap)
4098 struct ieee80211_key *wk;
4100 wk = &vap->iv_nw_keys[0];
4101 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
4102 if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4103 (void) mwl_key_set(vap, wk, vap->iv_myaddr);
4107 * Convert a legacy rate set to a firmware bitmask.
4110 get_rate_bitmap(const struct ieee80211_rateset *rs)
4116 for (i = 0; i < rs->rs_nrates; i++)
4117 switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
4118 case 2: rates |= 0x001; break;
4119 case 4: rates |= 0x002; break;
4120 case 11: rates |= 0x004; break;
4121 case 22: rates |= 0x008; break;
4122 case 44: rates |= 0x010; break;
4123 case 12: rates |= 0x020; break;
4124 case 18: rates |= 0x040; break;
4125 case 24: rates |= 0x080; break;
4126 case 36: rates |= 0x100; break;
4127 case 48: rates |= 0x200; break;
4128 case 72: rates |= 0x400; break;
4129 case 96: rates |= 0x800; break;
4130 case 108: rates |= 0x1000; break;
4136 * Construct an HT firmware bitmask from an HT rate set.
4139 get_htrate_bitmap(const struct ieee80211_htrateset *rs)
4145 for (i = 0; i < rs->rs_nrates; i++) {
4146 if (rs->rs_rates[i] < 16)
4147 rates |= 1<<rs->rs_rates[i];
4153 * Craft station database entry for station.
4154 * NB: use host byte order here, the hal handles byte swapping.
4156 static MWL_HAL_PEERINFO *
4157 mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4159 const struct ieee80211vap *vap = ni->ni_vap;
4161 memset(pi, 0, sizeof(*pi));
4162 pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4163 pi->CapInfo = ni->ni_capinfo;
4164 if (ni->ni_flags & IEEE80211_NODE_HT) {
4165 /* HT capabilities, etc */
4166 pi->HTCapabilitiesInfo = ni->ni_htcap;
4167 /* XXX pi.HTCapabilitiesInfo */
4168 pi->MacHTParamInfo = ni->ni_htparam;
4169 pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4170 pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4171 pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4172 pi->AddHtInfo.OpMode = ni->ni_htopmode;
4173 pi->AddHtInfo.stbc = ni->ni_htstbc;
4175 /* constrain according to local configuration */
4176 if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4177 pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4178 if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4179 pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4180 if (ni->ni_chw != 40)
4181 pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4187 * Re-create the local sta db entry for a vap to ensure
4188 * up to date WME state is pushed to the firmware. Because
4189 * this resets crypto state this must be followed by a
4190 * reload of any keys in the global key table.
4193 mwl_localstadb(struct ieee80211vap *vap)
4195 #define WME(ie) ((const struct ieee80211_wme_info *) ie)
4196 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4197 struct ieee80211_node *bss;
4198 MWL_HAL_PEERINFO pi;
4201 switch (vap->iv_opmode) {
4202 case IEEE80211_M_STA:
4204 error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4205 vap->iv_state == IEEE80211_S_RUN ?
4206 mkpeerinfo(&pi, bss) : NULL,
4207 (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4208 bss->ni_ies.wme_ie != NULL ?
4209 WME(bss->ni_ies.wme_ie)->wme_info : 0);
4211 mwl_setglobalkeys(vap);
4213 case IEEE80211_M_HOSTAP:
4214 case IEEE80211_M_MBSS:
4215 error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4216 0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4218 mwl_setglobalkeys(vap);
4229 mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4231 struct mwl_vap *mvp = MWL_VAP(vap);
4232 struct mwl_hal_vap *hvap = mvp->mv_hvap;
4233 struct ieee80211com *ic = vap->iv_ic;
4234 struct ieee80211_node *ni = NULL;
4235 struct ifnet *ifp = ic->ic_ifp;
4236 struct mwl_softc *sc = ifp->if_softc;
4237 struct mwl_hal *mh = sc->sc_mh;
4238 enum ieee80211_state ostate = vap->iv_state;
4241 DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4242 vap->iv_ifp->if_xname, __func__,
4243 ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4245 callout_stop(&sc->sc_timer);
4247 * Clear current radar detection state.
4249 if (ostate == IEEE80211_S_CAC) {
4250 /* stop quiet mode radar detection */
4251 mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4252 } else if (sc->sc_radarena) {
4253 /* stop in-service radar detection */
4254 mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4255 sc->sc_radarena = 0;
4258 * Carry out per-state actions before doing net80211 work.
4260 if (nstate == IEEE80211_S_INIT) {
4261 /* NB: only ap+sta vap's have a fw entity */
4264 } else if (nstate == IEEE80211_S_SCAN) {
4265 mwl_hal_start(hvap);
4266 /* NB: this disables beacon frames */
4267 mwl_hal_setinframode(hvap);
4268 } else if (nstate == IEEE80211_S_AUTH) {
4270 * Must create a sta db entry in case a WEP key needs to
4271 * be plumbed. This entry will be overwritten if we
4272 * associate; otherwise it will be reclaimed on node free.
4275 MWL_NODE(ni)->mn_hvap = hvap;
4276 (void) mwl_peerstadb(ni, 0, 0, NULL);
4277 } else if (nstate == IEEE80211_S_CSA) {
4278 /* XXX move to below? */
4279 if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4280 vap->iv_opmode == IEEE80211_M_MBSS)
4282 } else if (nstate == IEEE80211_S_CAC) {
4283 /* XXX move to below? */
4284 /* stop ap xmit and enable quiet mode radar detection */
4285 mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4289 * Invoke the parent method to do net80211 work.
4291 error = mvp->mv_newstate(vap, nstate, arg);
4294 * Carry out work that must be done after net80211 runs;
4295 * this work requires up to date state (e.g. iv_bss).
4297 if (error == 0 && nstate == IEEE80211_S_RUN) {
4298 /* NB: collect bss node again, it may have changed */
4301 DPRINTF(sc, MWL_DEBUG_STATE,
4302 "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4303 "capinfo 0x%04x chan %d\n",
4304 vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4305 ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4306 ieee80211_chan2ieee(ic, ic->ic_curchan));
4309 * Recreate local sta db entry to update WME/HT state.
4311 mwl_localstadb(vap);
4312 switch (vap->iv_opmode) {
4313 case IEEE80211_M_HOSTAP:
4314 case IEEE80211_M_MBSS:
4315 if (ostate == IEEE80211_S_CAC) {
4316 /* enable in-service radar detection */
4317 mwl_hal_setradardetection(mh,
4318 DR_IN_SERVICE_MONITOR_START);
4319 sc->sc_radarena = 1;
4322 * Allocate and setup the beacon frame
4323 * (and related state).
4325 error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4327 DPRINTF(sc, MWL_DEBUG_STATE,
4328 "%s: beacon setup failed, error %d\n",
4332 /* NB: must be after setting up beacon */
4333 mwl_hal_start(hvap);
4335 case IEEE80211_M_STA:
4336 DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4337 vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4339 * Set state now that we're associated.
4341 mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4343 mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4344 if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4345 sc->sc_ndwdsvaps++ == 0)
4346 mwl_hal_setdwds(mh, 1);
4348 case IEEE80211_M_WDS:
4349 DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4350 vap->iv_ifp->if_xname, __func__,
4351 ether_sprintf(ni->ni_bssid));
4352 mwl_seteapolformat(vap);
4358 * Set CS mode according to operating channel;
4359 * this mostly an optimization for 5GHz.
4361 * NB: must follow mwl_hal_start which resets csmode
4363 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4364 mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4366 mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4368 * Start timer to prod firmware.
4370 if (sc->sc_ageinterval != 0)
4371 callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4372 mwl_agestations, sc);
4373 } else if (nstate == IEEE80211_S_SLEEP) {
4374 /* XXX set chip in power save */
4375 } else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4376 --sc->sc_ndwdsvaps == 0)
4377 mwl_hal_setdwds(mh, 0);
4383 * Manage station id's; these are separate from AID's
4384 * as AID's may have values out of the range of possible
4385 * station id's acceptable to the firmware.
4388 allocstaid(struct mwl_softc *sc, int aid)
4392 if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4393 /* NB: don't use 0 */
4394 for (staid = 1; staid < MWL_MAXSTAID; staid++)
4395 if (isclr(sc->sc_staid, staid))
4399 setbit(sc->sc_staid, staid);
4404 delstaid(struct mwl_softc *sc, int staid)
4406 clrbit(sc->sc_staid, staid);
4410 * Setup driver-specific state for a newly associated node.
4411 * Note that we're called also on a re-associate, the isnew
4412 * param tells us if this is the first time or not.
4415 mwl_newassoc(struct ieee80211_node *ni, int isnew)
4417 struct ieee80211vap *vap = ni->ni_vap;
4418 struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4419 struct mwl_node *mn = MWL_NODE(ni);
4420 MWL_HAL_PEERINFO pi;
4424 aid = IEEE80211_AID(ni->ni_associd);
4426 mn->mn_staid = allocstaid(sc, aid);
4427 mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4430 /* XXX reset BA stream? */
4432 DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4433 __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4434 error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4436 DPRINTF(sc, MWL_DEBUG_NODE,
4437 "%s: error %d creating sta db entry\n",
4439 /* XXX how to deal with error? */
4444 * Periodically poke the firmware to age out station state
4445 * (power save queues, pending tx aggregates).
4448 mwl_agestations(void *arg)
4450 struct mwl_softc *sc = arg;
4452 mwl_hal_setkeepalive(sc->sc_mh);
4453 if (sc->sc_ageinterval != 0) /* NB: catch dynamic changes */
4454 callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4457 static const struct mwl_hal_channel *
4458 findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4462 for (i = 0; i < ci->nchannels; i++) {
4463 const struct mwl_hal_channel *hc = &ci->channels[i];
4464 if (hc->ieee == ieee)
4471 mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4472 int nchan, struct ieee80211_channel chans[])
4474 struct mwl_softc *sc = ic->ic_ifp->if_softc;
4475 struct mwl_hal *mh = sc->sc_mh;
4476 const MWL_HAL_CHANNELINFO *ci;
4479 for (i = 0; i < nchan; i++) {
4480 struct ieee80211_channel *c = &chans[i];
4481 const struct mwl_hal_channel *hc;
4483 if (IEEE80211_IS_CHAN_2GHZ(c)) {
4484 mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4485 IEEE80211_IS_CHAN_HT40(c) ?
4486 MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4487 } else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4488 mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4489 IEEE80211_IS_CHAN_HT40(c) ?
4490 MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4492 if_printf(ic->ic_ifp,
4493 "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4494 __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4498 * Verify channel has cal data and cap tx power.
4500 hc = findhalchannel(ci, c->ic_ieee);
4502 if (c->ic_maxpower > 2*hc->maxTxPow)
4503 c->ic_maxpower = 2*hc->maxTxPow;
4506 if (IEEE80211_IS_CHAN_HT40(c)) {
4508 * Look for the extension channel since the
4509 * hal table only has the primary channel.
4511 hc = findhalchannel(ci, c->ic_extieee);
4513 if (c->ic_maxpower > 2*hc->maxTxPow)
4514 c->ic_maxpower = 2*hc->maxTxPow;
4518 if_printf(ic->ic_ifp,
4519 "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4520 __func__, c->ic_ieee, c->ic_extieee,
4521 c->ic_freq, c->ic_flags);
4529 #define IEEE80211_CHAN_HTG (IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4530 #define IEEE80211_CHAN_HTA (IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4533 addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
4536 c->ic_flags = flags;
4539 c->ic_maxpower = 2*txpow;
4540 c->ic_maxregpower = txpow;
4543 static const struct ieee80211_channel *
4544 findchannel(const struct ieee80211_channel chans[], int nchans,
4545 int freq, int flags)
4547 const struct ieee80211_channel *c;
4550 for (i = 0; i < nchans; i++) {
4552 if (c->ic_freq == freq && c->ic_flags == flags)
4559 addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4560 const MWL_HAL_CHANNELINFO *ci, int flags)
4562 struct ieee80211_channel *c;
4563 const struct ieee80211_channel *extc;
4564 const struct mwl_hal_channel *hc;
4567 c = &chans[*nchans];
4569 flags &= ~IEEE80211_CHAN_HT;
4570 for (i = 0; i < ci->nchannels; i++) {
4572 * Each entry defines an HT40 channel pair; find the
4573 * extension channel above and the insert the pair.
4575 hc = &ci->channels[i];
4576 extc = findchannel(chans, *nchans, hc->freq+20,
4577 flags | IEEE80211_CHAN_HT20);
4579 if (*nchans >= maxchans)
4581 addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
4582 hc->ieee, hc->maxTxPow);
4583 c->ic_extieee = extc->ic_ieee;
4585 if (*nchans >= maxchans)
4587 addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
4588 extc->ic_ieee, hc->maxTxPow);
4589 c->ic_extieee = hc->ieee;
4596 addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4597 const MWL_HAL_CHANNELINFO *ci, int flags)
4599 struct ieee80211_channel *c;
4602 c = &chans[*nchans];
4604 for (i = 0; i < ci->nchannels; i++) {
4605 const struct mwl_hal_channel *hc;
4607 hc = &ci->channels[i];
4608 if (*nchans >= maxchans)
4610 addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
4612 if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
4613 /* g channel have a separate b-only entry */
4614 if (*nchans >= maxchans)
4617 c[-1].ic_flags = IEEE80211_CHAN_B;
4620 if (flags == IEEE80211_CHAN_HTG) {
4621 /* HT g channel have a separate g-only entry */
4622 if (*nchans >= maxchans)
4624 c[-1].ic_flags = IEEE80211_CHAN_G;
4626 c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4627 c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */
4630 if (flags == IEEE80211_CHAN_HTA) {
4631 /* HT a channel have a separate a-only entry */
4632 if (*nchans >= maxchans)
4634 c[-1].ic_flags = IEEE80211_CHAN_A;
4636 c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4637 c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */
4644 getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4645 struct ieee80211_channel chans[])
4647 const MWL_HAL_CHANNELINFO *ci;
4650 * Use the channel info from the hal to craft the
4651 * channel list. Note that we pass back an unsorted
4652 * list; the caller is required to sort it for us
4656 if (mwl_hal_getchannelinfo(sc->sc_mh,
4657 MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4658 addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4659 if (mwl_hal_getchannelinfo(sc->sc_mh,
4660 MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4661 addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4662 if (mwl_hal_getchannelinfo(sc->sc_mh,
4663 MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4664 addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4665 if (mwl_hal_getchannelinfo(sc->sc_mh,
4666 MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4667 addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4671 mwl_getradiocaps(struct ieee80211com *ic,
4672 int maxchans, int *nchans, struct ieee80211_channel chans[])
4674 struct mwl_softc *sc = ic->ic_ifp->if_softc;
4676 getchannels(sc, maxchans, nchans, chans);
4680 mwl_getchannels(struct mwl_softc *sc)
4682 struct ifnet *ifp = sc->sc_ifp;
4683 struct ieee80211com *ic = ifp->if_l2com;
4686 * Use the channel info from the hal to craft the
4687 * channel list for net80211. Note that we pass up
4688 * an unsorted list; net80211 will sort it for us.
4690 memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4692 getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4694 ic->ic_regdomain.regdomain = SKU_DEBUG;
4695 ic->ic_regdomain.country = CTRY_DEFAULT;
4696 ic->ic_regdomain.location = 'I';
4697 ic->ic_regdomain.isocc[0] = ' '; /* XXX? */
4698 ic->ic_regdomain.isocc[1] = ' ';
4699 return (ic->ic_nchans == 0 ? EIO : 0);
4701 #undef IEEE80211_CHAN_HTA
4702 #undef IEEE80211_CHAN_HTG
4706 mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4708 const struct mwl_rxdesc *ds = bf->bf_desc;
4709 uint32_t status = le32toh(ds->Status);
4711 printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n"
4712 " STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4713 ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext),
4714 le32toh(ds->pPhysBuffData), ds->RxControl,
4715 ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4716 "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4717 ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4718 ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4722 mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4724 const struct mwl_txdesc *ds = bf->bf_desc;
4725 uint32_t status = le32toh(ds->Status);
4727 printf("Q%u[%3u]", qnum, ix);
4728 printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr);
4729 printf(" NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4730 le32toh(ds->pPhysNext),
4731 le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4732 status & EAGLE_TXD_STATUS_USED ?
4733 "" : (status & 3) != 0 ? " *" : " !");
4734 printf(" RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4735 ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4736 le32toh(ds->SapPktInfo), le16toh(ds->Format));
4738 printf(" MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4739 , le32toh(ds->multiframes)
4740 , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4741 , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4742 , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4744 printf(" DATA:%08x %08x %08x %08x %08x %08x\n"
4745 , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4746 , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4747 , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4751 { const uint8_t *cp = (const uint8_t *) ds;
4753 for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4754 printf("%02x ", cp[i]);
4755 if (((i+1) % 16) == 0)
4762 #endif /* MWL_DEBUG */
4766 mwl_txq_dump(struct mwl_txq *txq)
4768 struct mwl_txbuf *bf;
4772 STAILQ_FOREACH(bf, &txq->active, bf_list) {
4773 struct mwl_txdesc *ds = bf->bf_desc;
4774 MWL_TXDESC_SYNC(txq, ds,
4775 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4777 mwl_printtxbuf(bf, txq->qnum, i);
4781 MWL_TXQ_UNLOCK(txq);
4786 mwl_watchdog(void *arg)
4788 struct mwl_softc *sc;
4792 callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4793 if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4797 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
4798 if (mwl_hal_setkeepalive(sc->sc_mh))
4799 if_printf(ifp, "transmit timeout (firmware hung?)\n");
4801 if_printf(ifp, "transmit timeout\n");
4804 mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4807 sc->sc_stats.mst_watchdog++;
4813 * Diagnostic interface to the HAL. This is used by various
4814 * tools to do things like retrieve register contents for
4815 * debugging. The mechanism is intentionally opaque so that
4816 * it can change frequently w/o concern for compatiblity.
4819 mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4821 struct mwl_hal *mh = sc->sc_mh;
4822 u_int id = md->md_id & MWL_DIAG_ID;
4823 void *indata = NULL;
4824 void *outdata = NULL;
4825 u_int32_t insize = md->md_in_size;
4826 u_int32_t outsize = md->md_out_size;
4829 if (md->md_id & MWL_DIAG_IN) {
4833 indata = malloc(insize, M_TEMP, M_NOWAIT);
4834 if (indata == NULL) {
4838 error = copyin(md->md_in_data, indata, insize);
4842 if (md->md_id & MWL_DIAG_DYN) {
4844 * Allocate a buffer for the results (otherwise the HAL
4845 * returns a pointer to a buffer where we can read the
4846 * results). Note that we depend on the HAL leaving this
4847 * pointer for us to use below in reclaiming the buffer;
4848 * may want to be more defensive.
4850 outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4851 if (outdata == NULL) {
4856 if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4857 if (outsize < md->md_out_size)
4858 md->md_out_size = outsize;
4859 if (outdata != NULL)
4860 error = copyout(outdata, md->md_out_data,
4866 if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4867 free(indata, M_TEMP);
4868 if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4869 free(outdata, M_TEMP);
4874 mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4876 struct mwl_hal *mh = sc->sc_mh;
4879 MWL_LOCK_ASSERT(sc);
4881 if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4882 device_printf(sc->sc_dev, "unable to load firmware\n");
4885 if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4886 device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4889 error = mwl_setupdma(sc);
4891 /* NB: mwl_setupdma prints a msg */
4895 * Reset tx/rx data structures; after reload we must
4896 * re-start the driver's notion of the next xmit/recv.
4898 mwl_draintxq(sc); /* clear pending frames */
4899 mwl_resettxq(sc); /* rebuild tx q lists */
4900 sc->sc_rxnext = NULL; /* force rx to start at the list head */
4903 #endif /* MWL_DIAGAPI */
4906 mwl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4908 #define IS_RUNNING(ifp) \
4909 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
4910 struct mwl_softc *sc = ifp->if_softc;
4911 struct ieee80211com *ic = ifp->if_l2com;
4912 struct ifreq *ifr = (struct ifreq *)data;
4913 int error = 0, startall;
4919 if (IS_RUNNING(ifp)) {
4921 * To avoid rescanning another access point,
4922 * do not call mwl_init() here. Instead,
4923 * only reflect promisc mode settings.
4926 } else if (ifp->if_flags & IFF_UP) {
4928 * Beware of being called during attach/detach
4929 * to reset promiscuous mode. In that case we
4930 * will still be marked UP but not RUNNING.
4931 * However trying to re-init the interface
4932 * is the wrong thing to do as we've already
4933 * torn down much of our state. There's
4934 * probably a better way to deal with this.
4936 if (!sc->sc_invalid) {
4937 mwl_init_locked(sc); /* XXX lose error */
4941 mwl_stop_locked(ifp, 1);
4944 ieee80211_start_all(ic);
4947 mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4948 /* NB: embed these numbers to get a consistent view */
4949 sc->sc_stats.mst_tx_packets = ifp->if_opackets;
4950 sc->sc_stats.mst_rx_packets = ifp->if_ipackets;
4952 * NB: Drop the softc lock in case of a page fault;
4953 * we'll accept any potential inconsisentcy in the
4954 * statistics. The alternative is to copy the data
4955 * to a local structure.
4957 return copyout(&sc->sc_stats,
4958 ifr->ifr_data, sizeof (sc->sc_stats));
4961 /* XXX check privs */
4962 return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4964 /* XXX check privs */
4966 error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4969 #endif /* MWL_DIAGAPI */
4971 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4974 error = ether_ioctl(ifp, cmd, data);
4986 mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4988 struct mwl_softc *sc = arg1;
4991 debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4992 error = sysctl_handle_int(oidp, &debug, 0, req);
4993 if (error || !req->newptr)
4995 mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4996 sc->sc_debug = debug & 0x00ffffff;
4999 #endif /* MWL_DEBUG */
5002 mwl_sysctlattach(struct mwl_softc *sc)
5005 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
5006 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
5008 sc->sc_debug = mwl_debug;
5009 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
5010 "debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
5011 mwl_sysctl_debug, "I", "control debugging printfs");
5016 * Announce various information on device/driver attach.
5019 mwl_announce(struct mwl_softc *sc)
5021 struct ifnet *ifp = sc->sc_ifp;
5023 if_printf(ifp, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
5024 sc->sc_hwspecs.hwVersion,
5025 (sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
5026 (sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
5027 (sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
5028 (sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
5029 sc->sc_hwspecs.regionCode);
5030 sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
5034 for (i = 0; i <= WME_AC_VO; i++) {
5035 struct mwl_txq *txq = sc->sc_ac2q[i];
5036 if_printf(ifp, "Use hw queue %u for %s traffic\n",
5037 txq->qnum, ieee80211_wme_acnames[i]);
5040 if (bootverbose || mwl_rxdesc != MWL_RXDESC)
5041 if_printf(ifp, "using %u rx descriptors\n", mwl_rxdesc);
5042 if (bootverbose || mwl_rxbuf != MWL_RXBUF)
5043 if_printf(ifp, "using %u rx buffers\n", mwl_rxbuf);
5044 if (bootverbose || mwl_txbuf != MWL_TXBUF)
5045 if_printf(ifp, "using %u tx buffers\n", mwl_txbuf);
5046 if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
5047 if_printf(ifp, "multi-bss support\n");
5048 #ifdef MWL_TX_NODROP
5050 if_printf(ifp, "no tx drop\n");