2 * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14 * redistribution must be conditioned upon including a substantially
15 * similar Disclaimer requirement for further binary redistribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGES.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 * Driver for the Marvell 88W8363 Wireless LAN controller.
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysctl.h>
45 #include <sys/malloc.h>
47 #include <sys/mutex.h>
48 #include <sys/kernel.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 #include <sys/errno.h>
52 #include <sys/callout.h>
54 #include <sys/endian.h>
55 #include <sys/kthread.h>
56 #include <sys/taskqueue.h>
58 #include <machine/bus.h>
61 #include <net/if_dl.h>
62 #include <net/if_media.h>
63 #include <net/if_types.h>
64 #include <net/if_arp.h>
65 #include <net/ethernet.h>
66 #include <net/if_llc.h>
70 #include <net80211/ieee80211_var.h>
71 #include <net80211/ieee80211_regdomain.h>
74 #include <netinet/in.h>
75 #include <netinet/if_ether.h>
78 #include <dev/mwl/if_mwlvar.h>
79 #include <dev/mwl/mwldiag.h>
81 /* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
82 #define MS(v,x) (((v) & x) >> x##_S)
83 #define SM(v,x) (((v) << x##_S) & x)
85 static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
86 const char name[IFNAMSIZ], int unit, int opmode,
87 int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
88 const uint8_t mac[IEEE80211_ADDR_LEN]);
89 static void mwl_vap_delete(struct ieee80211vap *);
90 static int mwl_setupdma(struct mwl_softc *);
91 static int mwl_hal_reset(struct mwl_softc *sc);
92 static int mwl_init_locked(struct mwl_softc *);
93 static void mwl_init(void *);
94 static void mwl_stop_locked(struct ifnet *, int);
95 static int mwl_reset(struct ieee80211vap *, u_long);
96 static void mwl_stop(struct ifnet *, int);
97 static void mwl_start(struct ifnet *);
98 static int mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
99 const struct ieee80211_bpf_params *);
100 static int mwl_media_change(struct ifnet *);
101 static void mwl_watchdog(void *);
102 static int mwl_ioctl(struct ifnet *, u_long, caddr_t);
103 static void mwl_radar_proc(void *, int);
104 static void mwl_chanswitch_proc(void *, int);
105 static void mwl_bawatchdog_proc(void *, int);
106 static int mwl_key_alloc(struct ieee80211vap *,
107 struct ieee80211_key *,
108 ieee80211_keyix *, ieee80211_keyix *);
109 static int mwl_key_delete(struct ieee80211vap *,
110 const struct ieee80211_key *);
111 static int mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *,
112 const uint8_t mac[IEEE80211_ADDR_LEN]);
113 static int mwl_mode_init(struct mwl_softc *);
114 static void mwl_update_mcast(struct ifnet *);
115 static void mwl_update_promisc(struct ifnet *);
116 static void mwl_updateslot(struct ifnet *);
117 static int mwl_beacon_setup(struct ieee80211vap *);
118 static void mwl_beacon_update(struct ieee80211vap *, int);
119 #ifdef MWL_HOST_PS_SUPPORT
120 static void mwl_update_ps(struct ieee80211vap *, int);
121 static int mwl_set_tim(struct ieee80211_node *, int);
123 static int mwl_dma_setup(struct mwl_softc *);
124 static void mwl_dma_cleanup(struct mwl_softc *);
125 static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
126 const uint8_t [IEEE80211_ADDR_LEN]);
127 static void mwl_node_cleanup(struct ieee80211_node *);
128 static void mwl_node_drain(struct ieee80211_node *);
129 static void mwl_node_getsignal(const struct ieee80211_node *,
131 static void mwl_node_getmimoinfo(const struct ieee80211_node *,
132 struct ieee80211_mimo_info *);
133 static int mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
134 static void mwl_rx_proc(void *, int);
135 static void mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
136 static int mwl_tx_setup(struct mwl_softc *, int, int);
137 static int mwl_wme_update(struct ieee80211com *);
138 static void mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
139 static void mwl_tx_cleanup(struct mwl_softc *);
140 static uint16_t mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
141 static int mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
142 struct mwl_txbuf *, struct mbuf *);
143 static void mwl_tx_proc(void *, int);
144 static int mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
145 static void mwl_draintxq(struct mwl_softc *);
146 static void mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
147 static int mwl_recv_action(struct ieee80211_node *,
148 const struct ieee80211_frame *,
149 const uint8_t *, const uint8_t *);
150 static int mwl_addba_request(struct ieee80211_node *,
151 struct ieee80211_tx_ampdu *, int dialogtoken,
152 int baparamset, int batimeout);
153 static int mwl_addba_response(struct ieee80211_node *,
154 struct ieee80211_tx_ampdu *, int status,
155 int baparamset, int batimeout);
156 static void mwl_addba_stop(struct ieee80211_node *,
157 struct ieee80211_tx_ampdu *);
158 static int mwl_startrecv(struct mwl_softc *);
159 static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
160 struct ieee80211_channel *);
161 static int mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
162 static void mwl_scan_start(struct ieee80211com *);
163 static void mwl_scan_end(struct ieee80211com *);
164 static void mwl_set_channel(struct ieee80211com *);
165 static int mwl_peerstadb(struct ieee80211_node *,
166 int aid, int staid, MWL_HAL_PEERINFO *pi);
167 static int mwl_localstadb(struct ieee80211vap *);
168 static int mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
169 static int allocstaid(struct mwl_softc *sc, int aid);
170 static void delstaid(struct mwl_softc *sc, int staid);
171 static void mwl_newassoc(struct ieee80211_node *, int);
172 static void mwl_agestations(void *);
173 static int mwl_setregdomain(struct ieee80211com *,
174 struct ieee80211_regdomain *, int,
175 struct ieee80211_channel []);
176 static void mwl_getradiocaps(struct ieee80211com *, int, int *,
177 struct ieee80211_channel []);
178 static int mwl_getchannels(struct mwl_softc *);
180 static void mwl_sysctlattach(struct mwl_softc *);
181 static void mwl_announce(struct mwl_softc *);
183 SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
185 static int mwl_rxdesc = MWL_RXDESC; /* # rx desc's to allocate */
186 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
187 0, "rx descriptors allocated");
188 static int mwl_rxbuf = MWL_RXBUF; /* # rx buffers to allocate */
189 SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RW, &mwl_rxbuf,
190 0, "rx buffers allocated");
191 TUNABLE_INT("hw.mwl.rxbuf", &mwl_rxbuf);
192 static int mwl_txbuf = MWL_TXBUF; /* # tx buffers to allocate */
193 SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RW, &mwl_txbuf,
194 0, "tx buffers allocated");
195 TUNABLE_INT("hw.mwl.txbuf", &mwl_txbuf);
196 static int mwl_txcoalesce = 8; /* # tx packets to q before poking f/w*/
197 SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RW, &mwl_txcoalesce,
198 0, "tx buffers to send at once");
199 TUNABLE_INT("hw.mwl.txcoalesce", &mwl_txcoalesce);
200 static int mwl_rxquota = MWL_RXBUF; /* # max buffers to process */
201 SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RW, &mwl_rxquota,
202 0, "max rx buffers to process per interrupt");
203 TUNABLE_INT("hw.mwl.rxquota", &mwl_rxquota);
204 static int mwl_rxdmalow = 3; /* # min buffers for wakeup */
205 SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RW, &mwl_rxdmalow,
206 0, "min free rx buffers before restarting traffic");
207 TUNABLE_INT("hw.mwl.rxdmalow", &mwl_rxdmalow);
210 static int mwl_debug = 0;
211 SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RW, &mwl_debug,
212 0, "control debugging printfs");
213 TUNABLE_INT("hw.mwl.debug", &mwl_debug);
215 MWL_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
216 MWL_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */
217 MWL_DEBUG_RECV = 0x00000004, /* basic recv operation */
218 MWL_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */
219 MWL_DEBUG_RESET = 0x00000010, /* reset processing */
220 MWL_DEBUG_BEACON = 0x00000020, /* beacon handling */
221 MWL_DEBUG_INTR = 0x00000040, /* ISR */
222 MWL_DEBUG_TX_PROC = 0x00000080, /* tx ISR proc */
223 MWL_DEBUG_RX_PROC = 0x00000100, /* rx ISR proc */
224 MWL_DEBUG_KEYCACHE = 0x00000200, /* key cache management */
225 MWL_DEBUG_STATE = 0x00000400, /* 802.11 state transitions */
226 MWL_DEBUG_NODE = 0x00000800, /* node management */
227 MWL_DEBUG_RECV_ALL = 0x00001000, /* trace all frames (beacons) */
228 MWL_DEBUG_TSO = 0x00002000, /* TSO processing */
229 MWL_DEBUG_AMPDU = 0x00004000, /* BA stream handling */
230 MWL_DEBUG_ANY = 0xffffffff
232 #define IS_BEACON(wh) \
233 ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
234 (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
235 #define IFF_DUMPPKTS_RECV(sc, wh) \
236 (((sc->sc_debug & MWL_DEBUG_RECV) && \
237 ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \
238 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
239 #define IFF_DUMPPKTS_XMIT(sc) \
240 ((sc->sc_debug & MWL_DEBUG_XMIT) || \
241 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
242 #define DPRINTF(sc, m, fmt, ...) do { \
243 if (sc->sc_debug & (m)) \
244 printf(fmt, __VA_ARGS__); \
246 #define KEYPRINTF(sc, hk, mac) do { \
247 if (sc->sc_debug & MWL_DEBUG_KEYCACHE) \
248 mwl_keyprint(sc, __func__, hk, mac); \
250 static void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
251 static void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
253 #define IFF_DUMPPKTS_RECV(sc, wh) \
254 ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
255 #define IFF_DUMPPKTS_XMIT(sc) \
256 ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
257 #define DPRINTF(sc, m, fmt, ...) do { \
260 #define KEYPRINTF(sc, k, mac) do { \
265 MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
268 * Each packet has fixed front matter: a 2-byte length
269 * of the payload, followed by a 4-address 802.11 header
270 * (regardless of the actual header and always w/o any
271 * QoS header). The payload then follows.
275 struct ieee80211_frame_addr4 wh;
279 * Read/Write shorthands for accesses to BAR 0. Note
280 * that all BAR 1 operations are done in the "hal" and
281 * there should be no reference to them here.
283 static __inline uint32_t
284 RD4(struct mwl_softc *sc, bus_size_t off)
286 return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
290 WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
292 bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
296 mwl_attach(uint16_t devid, struct mwl_softc *sc)
299 struct ieee80211com *ic;
303 DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
305 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
307 device_printf(sc->sc_dev, "cannot if_alloc()\n");
312 /* set these up early for if_printf use */
313 if_initname(ifp, device_get_name(sc->sc_dev),
314 device_get_unit(sc->sc_dev));
316 mh = mwl_hal_attach(sc->sc_dev, devid,
317 sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
319 if_printf(ifp, "unable to attach HAL\n");
325 * Load firmware so we can get setup. We arbitrarily
326 * pick station firmware; we'll re-load firmware as
327 * needed so setting up the wrong mode isn't a big deal.
329 if (mwl_hal_fwload(mh, NULL) != 0) {
330 if_printf(ifp, "unable to setup builtin firmware\n");
334 if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
335 if_printf(ifp, "unable to fetch h/w specs\n");
339 error = mwl_getchannels(sc);
343 sc->sc_txantenna = 0; /* h/w default */
344 sc->sc_rxantenna = 0; /* h/w default */
345 sc->sc_invalid = 0; /* ready to go, enable int handling */
346 sc->sc_ageinterval = MWL_AGEINTERVAL;
349 * Allocate tx+rx descriptors and populate the lists.
350 * We immediately push the information to the firmware
351 * as otherwise it gets upset.
353 error = mwl_dma_setup(sc);
355 if_printf(ifp, "failed to setup descriptors: %d\n", error);
358 error = mwl_setupdma(sc); /* push to firmware */
359 if (error != 0) /* NB: mwl_setupdma prints msg */
362 callout_init(&sc->sc_timer, CALLOUT_MPSAFE);
363 callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
365 sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
366 taskqueue_thread_enqueue, &sc->sc_tq);
367 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
368 "%s taskq", ifp->if_xname);
370 TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
371 TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
372 TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
373 TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
375 /* NB: insure BK queue is the lowest priority h/w queue */
376 if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
377 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
378 ieee80211_wme_acnames[WME_AC_BK]);
382 if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
383 !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
384 !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
386 * Not enough hardware tx queues to properly do WME;
387 * just punt and assign them all to the same h/w queue.
388 * We could do a better job of this if, for example,
389 * we allocate queues when we switch from station to
392 if (sc->sc_ac2q[WME_AC_VI] != NULL)
393 mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
394 if (sc->sc_ac2q[WME_AC_BE] != NULL)
395 mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
396 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
397 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
398 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
400 TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
403 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
404 ifp->if_start = mwl_start;
405 ifp->if_ioctl = mwl_ioctl;
406 ifp->if_init = mwl_init;
407 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
408 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
409 IFQ_SET_READY(&ifp->if_snd);
412 /* XXX not right but it's not used anywhere important */
413 ic->ic_phytype = IEEE80211_T_OFDM;
414 ic->ic_opmode = IEEE80211_M_STA;
416 IEEE80211_C_STA /* station mode supported */
417 | IEEE80211_C_HOSTAP /* hostap mode */
418 | IEEE80211_C_MONITOR /* monitor mode */
420 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
421 | IEEE80211_C_AHDEMO /* adhoc demo mode */
423 | IEEE80211_C_MBSS /* mesh point link mode */
424 | IEEE80211_C_WDS /* WDS supported */
425 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
426 | IEEE80211_C_SHSLOT /* short slot time supported */
427 | IEEE80211_C_WME /* WME/WMM supported */
428 | IEEE80211_C_BURST /* xmit bursting supported */
429 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
430 | IEEE80211_C_BGSCAN /* capable of bg scanning */
431 | IEEE80211_C_TXFRAG /* handle tx frags */
432 | IEEE80211_C_TXPMGT /* capable of txpow mgt */
433 | IEEE80211_C_DFS /* DFS supported */
437 IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */
438 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */
439 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
440 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
441 | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
442 #if MWL_AGGR_SIZE == 7935
443 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */
445 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */
448 | IEEE80211_HTCAP_PSMP /* PSMP supported */
449 | IEEE80211_HTCAP_40INTOLERANT /* 40MHz intolerant */
451 /* s/w capabilities */
452 | IEEE80211_HTC_HT /* HT operation */
453 | IEEE80211_HTC_AMPDU /* tx A-MPDU */
454 | IEEE80211_HTC_AMSDU /* tx A-MSDU */
455 | IEEE80211_HTC_SMPS /* SMPS available */
459 * Mark h/w crypto support.
460 * XXX no way to query h/w support.
462 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
463 | IEEE80211_CRYPTO_AES_CCM
464 | IEEE80211_CRYPTO_TKIP
465 | IEEE80211_CRYPTO_TKIPMIC
468 * Transmit requires space in the packet for a special
469 * format transmit record and optional padding between
470 * this record and the payload. Ask the net80211 layer
471 * to arrange this when encapsulating packets so we can
472 * add it efficiently.
474 ic->ic_headroom = sizeof(struct mwltxrec) -
475 sizeof(struct ieee80211_frame);
477 /* call MI attach routine. */
478 ieee80211_ifattach(ic, sc->sc_hwspecs.macAddr);
479 ic->ic_setregdomain = mwl_setregdomain;
480 ic->ic_getradiocaps = mwl_getradiocaps;
481 /* override default methods */
482 ic->ic_raw_xmit = mwl_raw_xmit;
483 ic->ic_newassoc = mwl_newassoc;
484 ic->ic_updateslot = mwl_updateslot;
485 ic->ic_update_mcast = mwl_update_mcast;
486 ic->ic_update_promisc = mwl_update_promisc;
487 ic->ic_wme.wme_update = mwl_wme_update;
489 ic->ic_node_alloc = mwl_node_alloc;
490 sc->sc_node_cleanup = ic->ic_node_cleanup;
491 ic->ic_node_cleanup = mwl_node_cleanup;
492 sc->sc_node_drain = ic->ic_node_drain;
493 ic->ic_node_drain = mwl_node_drain;
494 ic->ic_node_getsignal = mwl_node_getsignal;
495 ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
497 ic->ic_scan_start = mwl_scan_start;
498 ic->ic_scan_end = mwl_scan_end;
499 ic->ic_set_channel = mwl_set_channel;
501 sc->sc_recv_action = ic->ic_recv_action;
502 ic->ic_recv_action = mwl_recv_action;
503 sc->sc_addba_request = ic->ic_addba_request;
504 ic->ic_addba_request = mwl_addba_request;
505 sc->sc_addba_response = ic->ic_addba_response;
506 ic->ic_addba_response = mwl_addba_response;
507 sc->sc_addba_stop = ic->ic_addba_stop;
508 ic->ic_addba_stop = mwl_addba_stop;
510 ic->ic_vap_create = mwl_vap_create;
511 ic->ic_vap_delete = mwl_vap_delete;
513 ieee80211_radiotap_attach(ic,
514 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
515 MWL_TX_RADIOTAP_PRESENT,
516 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
517 MWL_RX_RADIOTAP_PRESENT);
519 * Setup dynamic sysctl's now that country code and
520 * regdomain are available from the hal.
522 mwl_sysctlattach(sc);
525 ieee80211_announce(ic);
539 mwl_detach(struct mwl_softc *sc)
541 struct ifnet *ifp = sc->sc_ifp;
542 struct ieee80211com *ic = ifp->if_l2com;
544 DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
545 __func__, ifp->if_flags);
549 * NB: the order of these is important:
550 * o call the 802.11 layer before detaching the hal to
551 * insure callbacks into the driver to delete global
552 * key cache entries can be handled
553 * o reclaim the tx queue data structures after calling
554 * the 802.11 layer as we'll get called back to reclaim
555 * node state and potentially want to use them
556 * o to cleanup the tx queues the hal is called, so detach
558 * Other than that, it's straightforward...
560 ieee80211_ifdetach(ic);
561 callout_drain(&sc->sc_watchdog);
564 mwl_hal_detach(sc->sc_mh);
571 * MAC address handling for multiple BSS on the same radio.
572 * The first vap uses the MAC address from the EEPROM. For
573 * subsequent vap's we set the U/L bit (bit 1) in the MAC
574 * address and use the next six bits as an index.
577 assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
581 if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
582 /* NB: we only do this if h/w supports multiple bssid */
583 for (i = 0; i < 32; i++)
584 if ((sc->sc_bssidmask & (1<<i)) == 0)
587 mac[0] |= (i << 2)|0x2;
590 sc->sc_bssidmask |= 1<<i;
596 reclaim_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN])
599 if (i != 0 || --sc->sc_nbssid0 == 0)
600 sc->sc_bssidmask &= ~(1<<i);
603 static struct ieee80211vap *
604 mwl_vap_create(struct ieee80211com *ic,
605 const char name[IFNAMSIZ], int unit, int opmode, int flags,
606 const uint8_t bssid[IEEE80211_ADDR_LEN],
607 const uint8_t mac0[IEEE80211_ADDR_LEN])
609 struct ifnet *ifp = ic->ic_ifp;
610 struct mwl_softc *sc = ifp->if_softc;
611 struct mwl_hal *mh = sc->sc_mh;
612 struct ieee80211vap *vap, *apvap;
613 struct mwl_hal_vap *hvap;
615 uint8_t mac[IEEE80211_ADDR_LEN];
617 IEEE80211_ADDR_COPY(mac, mac0);
619 case IEEE80211_M_HOSTAP:
620 case IEEE80211_M_MBSS:
621 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
622 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
623 hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
625 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
626 reclaim_address(sc, mac);
630 case IEEE80211_M_STA:
631 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
632 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
633 hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
635 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
636 reclaim_address(sc, mac);
639 /* no h/w beacon miss support; always use s/w */
640 flags |= IEEE80211_CLONE_NOBEACONS;
642 case IEEE80211_M_WDS:
643 hvap = NULL; /* NB: we use associated AP vap */
644 if (sc->sc_napvaps == 0)
645 return NULL; /* no existing AP vap */
647 case IEEE80211_M_MONITOR:
650 case IEEE80211_M_IBSS:
651 case IEEE80211_M_AHDEMO:
656 mvp = (struct mwl_vap *) malloc(sizeof(struct mwl_vap),
657 M_80211_VAP, M_NOWAIT | M_ZERO);
660 mwl_hal_delvap(hvap);
661 if ((flags & IEEE80211_CLONE_MACADDR) == 0)
662 reclaim_address(sc, mac);
668 if (opmode == IEEE80211_M_WDS) {
670 * WDS vaps must have an associated AP vap; find one.
673 TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
674 if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
675 mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
678 KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
681 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
683 IEEE80211_ADDR_COPY(vap->iv_myaddr, mac);
684 /* override with driver methods */
685 mvp->mv_newstate = vap->iv_newstate;
686 vap->iv_newstate = mwl_newstate;
687 vap->iv_max_keyix = 0; /* XXX */
688 vap->iv_key_alloc = mwl_key_alloc;
689 vap->iv_key_delete = mwl_key_delete;
690 vap->iv_key_set = mwl_key_set;
691 #ifdef MWL_HOST_PS_SUPPORT
692 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
693 vap->iv_update_ps = mwl_update_ps;
694 mvp->mv_set_tim = vap->iv_set_tim;
695 vap->iv_set_tim = mwl_set_tim;
698 vap->iv_reset = mwl_reset;
699 vap->iv_update_beacon = mwl_beacon_update;
701 /* override max aid so sta's cannot assoc when we're out of sta id's */
702 vap->iv_max_aid = MWL_MAXSTAID;
703 /* override default A-MPDU rx parameters */
704 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
705 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
708 ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status);
710 switch (vap->iv_opmode) {
711 case IEEE80211_M_HOSTAP:
712 case IEEE80211_M_MBSS:
713 case IEEE80211_M_STA:
715 * Setup sta db entry for local address.
718 if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
719 vap->iv_opmode == IEEE80211_M_MBSS)
724 case IEEE80211_M_WDS:
731 * Setup overall operating mode.
734 ic->ic_opmode = IEEE80211_M_HOSTAP;
735 else if (sc->sc_nstavaps)
736 ic->ic_opmode = IEEE80211_M_STA;
738 ic->ic_opmode = opmode;
744 mwl_vap_delete(struct ieee80211vap *vap)
746 struct mwl_vap *mvp = MWL_VAP(vap);
747 struct ifnet *parent = vap->iv_ic->ic_ifp;
748 struct mwl_softc *sc = parent->if_softc;
749 struct mwl_hal *mh = sc->sc_mh;
750 struct mwl_hal_vap *hvap = mvp->mv_hvap;
751 enum ieee80211_opmode opmode = vap->iv_opmode;
753 /* XXX disallow ap vap delete if WDS still present */
754 if (parent->if_drv_flags & IFF_DRV_RUNNING) {
755 /* quiesce h/w while we remove the vap */
756 mwl_hal_intrset(mh, 0); /* disable interrupts */
758 ieee80211_vap_detach(vap);
760 case IEEE80211_M_HOSTAP:
761 case IEEE80211_M_MBSS:
762 case IEEE80211_M_STA:
763 KASSERT(hvap != NULL, ("no hal vap handle"));
764 (void) mwl_hal_delstation(hvap, vap->iv_myaddr);
765 mwl_hal_delvap(hvap);
766 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
770 /* XXX don't do it for IEEE80211_CLONE_MACADDR */
771 reclaim_address(sc, vap->iv_myaddr);
773 case IEEE80211_M_WDS:
779 mwl_cleartxq(sc, vap);
780 free(mvp, M_80211_VAP);
781 if (parent->if_drv_flags & IFF_DRV_RUNNING)
782 mwl_hal_intrset(mh, sc->sc_imask);
786 mwl_suspend(struct mwl_softc *sc)
788 struct ifnet *ifp = sc->sc_ifp;
790 DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
791 __func__, ifp->if_flags);
797 mwl_resume(struct mwl_softc *sc)
799 struct ifnet *ifp = sc->sc_ifp;
801 DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
802 __func__, ifp->if_flags);
804 if (ifp->if_flags & IFF_UP)
809 mwl_shutdown(void *arg)
811 struct mwl_softc *sc = arg;
813 mwl_stop(sc->sc_ifp, 1);
817 * Interrupt handler. Most of the actual processing is deferred.
822 struct mwl_softc *sc = arg;
823 struct mwl_hal *mh = sc->sc_mh;
826 if (sc->sc_invalid) {
828 * The hardware is not ready/present, don't touch anything.
829 * Note this can happen early on if the IRQ is shared.
831 DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
835 * Figure out the reason(s) for the interrupt.
837 mwl_hal_getisr(mh, &status); /* NB: clears ISR too */
838 if (status == 0) /* must be a shared irq */
841 DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
842 __func__, status, sc->sc_imask);
843 if (status & MACREG_A2HRIC_BIT_RX_RDY)
844 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
845 if (status & MACREG_A2HRIC_BIT_TX_DONE)
846 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
847 if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
848 taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
849 if (status & MACREG_A2HRIC_BIT_OPC_DONE)
851 if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
854 if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
856 sc->sc_stats.mst_rx_badtkipicv++;
858 if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
859 /* 11n aggregation queue is empty, re-fill */
862 if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
865 if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
866 /* radar detected, process event */
867 taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
869 if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
870 /* DFS channel switch */
871 taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
876 mwl_radar_proc(void *arg, int pending)
878 struct mwl_softc *sc = arg;
879 struct ifnet *ifp = sc->sc_ifp;
880 struct ieee80211com *ic = ifp->if_l2com;
882 DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
885 sc->sc_stats.mst_radardetect++;
886 /* XXX stop h/w BA streams? */
889 ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
890 IEEE80211_UNLOCK(ic);
894 mwl_chanswitch_proc(void *arg, int pending)
896 struct mwl_softc *sc = arg;
897 struct ifnet *ifp = sc->sc_ifp;
898 struct ieee80211com *ic = ifp->if_l2com;
900 DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
904 sc->sc_csapending = 0;
905 ieee80211_csa_completeswitch(ic);
906 IEEE80211_UNLOCK(ic);
910 mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
912 struct ieee80211_node *ni = sp->data[0];
914 /* send DELBA and drop the stream */
915 ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
919 mwl_bawatchdog_proc(void *arg, int pending)
921 struct mwl_softc *sc = arg;
922 struct mwl_hal *mh = sc->sc_mh;
923 const MWL_HAL_BASTREAM *sp;
926 sc->sc_stats.mst_bawatchdog++;
928 if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
929 DPRINTF(sc, MWL_DEBUG_AMPDU,
930 "%s: could not get bitmap\n", __func__);
931 sc->sc_stats.mst_bawatchdog_failed++;
934 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
935 if (bitmap == 0xff) {
937 /* disable all ba streams */
938 for (bitmap = 0; bitmap < 8; bitmap++) {
939 sp = mwl_hal_bastream_lookup(mh, bitmap);
946 DPRINTF(sc, MWL_DEBUG_AMPDU,
947 "%s: no BA streams found\n", __func__);
948 sc->sc_stats.mst_bawatchdog_empty++;
950 } else if (bitmap != 0xaa) {
951 /* disable a single ba stream */
952 sp = mwl_hal_bastream_lookup(mh, bitmap);
956 DPRINTF(sc, MWL_DEBUG_AMPDU,
957 "%s: no BA stream %d\n", __func__, bitmap);
958 sc->sc_stats.mst_bawatchdog_notfound++;
964 * Convert net80211 channel to a HAL channel.
967 mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
969 hc->channel = chan->ic_ieee;
971 *(uint32_t *)&hc->channelFlags = 0;
972 if (IEEE80211_IS_CHAN_2GHZ(chan))
973 hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
974 else if (IEEE80211_IS_CHAN_5GHZ(chan))
975 hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
976 if (IEEE80211_IS_CHAN_HT40(chan)) {
977 hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
978 if (IEEE80211_IS_CHAN_HT40U(chan))
979 hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
981 hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
983 hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
984 /* XXX 10MHz channels */
988 * Inform firmware of our tx/rx dma setup. The BAR 0
989 * writes below are for compatibility with older firmware.
990 * For current firmware we send this information with a
991 * cmd block via mwl_hal_sethwdma.
994 mwl_setupdma(struct mwl_softc *sc)
998 sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
999 WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
1000 WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
1002 for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
1003 struct mwl_txq *txq = &sc->sc_txq[i];
1004 sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
1005 WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
1007 sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
1008 sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
1010 error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
1012 device_printf(sc->sc_dev,
1013 "unable to setup tx/rx dma; hal status %u\n", error);
1020 * Inform firmware of tx rate parameters.
1021 * Called after a channel change.
1024 mwl_setcurchanrates(struct mwl_softc *sc)
1026 struct ifnet *ifp = sc->sc_ifp;
1027 struct ieee80211com *ic = ifp->if_l2com;
1028 const struct ieee80211_rateset *rs;
1029 MWL_HAL_TXRATE rates;
1031 memset(&rates, 0, sizeof(rates));
1032 rs = ieee80211_get_suprates(ic, ic->ic_curchan);
1033 /* rate used to send management frames */
1034 rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
1035 /* rate used to send multicast frames */
1036 rates.McastRate = rates.MgtRate;
1038 return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
1042 * Inform firmware of tx rate parameters. Called whenever
1043 * user-settable params change and after a channel change.
1046 mwl_setrates(struct ieee80211vap *vap)
1048 struct mwl_vap *mvp = MWL_VAP(vap);
1049 struct ieee80211_node *ni = vap->iv_bss;
1050 const struct ieee80211_txparam *tp = ni->ni_txparms;
1051 MWL_HAL_TXRATE rates;
1053 KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1056 * Update the h/w rate map.
1057 * NB: 0x80 for MCS is passed through unchanged
1059 memset(&rates, 0, sizeof(rates));
1060 /* rate used to send management frames */
1061 rates.MgtRate = tp->mgmtrate;
1062 /* rate used to send multicast frames */
1063 rates.McastRate = tp->mcastrate;
1065 /* while here calculate EAPOL fixed rate cookie */
1066 mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
1068 return mwl_hal_settxrate(mvp->mv_hvap,
1069 tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
1070 RATE_FIXED : RATE_AUTO, &rates);
1074 * Setup a fixed xmit rate cookie for EAPOL frames.
1077 mwl_seteapolformat(struct ieee80211vap *vap)
1079 struct mwl_vap *mvp = MWL_VAP(vap);
1080 struct ieee80211_node *ni = vap->iv_bss;
1081 enum ieee80211_phymode mode;
1084 KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
1086 mode = ieee80211_chan2mode(ni->ni_chan);
1088 * Use legacy rates when operating a mixed HT+non-HT bss.
1089 * NB: this may violate POLA for sta and wds vap's.
1091 if (mode == IEEE80211_MODE_11NA &&
1092 (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1093 rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
1094 else if (mode == IEEE80211_MODE_11NG &&
1095 (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
1096 rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
1098 rate = vap->iv_txparms[mode].mgmtrate;
1100 mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
1104 * Map SKU+country code to region code for radar bin'ing.
1107 mwl_map2regioncode(const struct ieee80211_regdomain *rd)
1109 switch (rd->regdomain) {
1112 return DOMAIN_CODE_FCC;
1114 return DOMAIN_CODE_IC;
1118 if (rd->country == CTRY_SPAIN)
1119 return DOMAIN_CODE_SPAIN;
1120 if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
1121 return DOMAIN_CODE_FRANCE;
1122 /* XXX force 1.3.1 radar type */
1123 return DOMAIN_CODE_ETSI_131;
1125 return DOMAIN_CODE_MKK;
1127 return DOMAIN_CODE_DGT; /* Taiwan */
1131 return DOMAIN_CODE_AUS; /* Australia */
1134 return DOMAIN_CODE_FCC; /* XXX? */
1138 mwl_hal_reset(struct mwl_softc *sc)
1140 struct ifnet *ifp = sc->sc_ifp;
1141 struct ieee80211com *ic = ifp->if_l2com;
1142 struct mwl_hal *mh = sc->sc_mh;
1144 mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
1145 mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
1146 mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
1147 mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
1148 mwl_chan_set(sc, ic->ic_curchan);
1149 /* NB: RF/RA performance tuned for indoor mode */
1150 mwl_hal_setrateadaptmode(mh, 0);
1151 mwl_hal_setoptimizationlevel(mh,
1152 (ic->ic_flags & IEEE80211_F_BURST) != 0);
1154 mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
1156 mwl_hal_setaggampduratemode(mh, 1, 80); /* XXX */
1157 mwl_hal_setcfend(mh, 0); /* XXX */
1163 mwl_init_locked(struct mwl_softc *sc)
1165 struct ifnet *ifp = sc->sc_ifp;
1166 struct mwl_hal *mh = sc->sc_mh;
1169 DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1170 __func__, ifp->if_flags);
1172 MWL_LOCK_ASSERT(sc);
1175 * Stop anything previously setup. This is safe
1176 * whether this is the first time through or not.
1178 mwl_stop_locked(ifp, 0);
1181 * Push vap-independent state to the firmware.
1183 if (!mwl_hal_reset(sc)) {
1184 if_printf(ifp, "unable to reset hardware\n");
1189 * Setup recv (once); transmit is already good to go.
1191 error = mwl_startrecv(sc);
1193 if_printf(ifp, "unable to start recv logic\n");
1198 * Enable interrupts.
1200 sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
1201 | MACREG_A2HRIC_BIT_TX_DONE
1202 | MACREG_A2HRIC_BIT_OPC_DONE
1204 | MACREG_A2HRIC_BIT_MAC_EVENT
1206 | MACREG_A2HRIC_BIT_ICV_ERROR
1207 | MACREG_A2HRIC_BIT_RADAR_DETECT
1208 | MACREG_A2HRIC_BIT_CHAN_SWITCH
1210 | MACREG_A2HRIC_BIT_QUEUE_EMPTY
1212 | MACREG_A2HRIC_BIT_BA_WATCHDOG
1213 | MACREQ_A2HRIC_BIT_TX_ACK
1216 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1217 mwl_hal_intrset(mh, sc->sc_imask);
1218 callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
1226 struct mwl_softc *sc = arg;
1227 struct ifnet *ifp = sc->sc_ifp;
1228 struct ieee80211com *ic = ifp->if_l2com;
1231 DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
1232 __func__, ifp->if_flags);
1235 error = mwl_init_locked(sc);
1239 ieee80211_start_all(ic); /* start all vap's */
1243 mwl_stop_locked(struct ifnet *ifp, int disable)
1245 struct mwl_softc *sc = ifp->if_softc;
1247 DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1248 __func__, sc->sc_invalid, ifp->if_flags);
1250 MWL_LOCK_ASSERT(sc);
1251 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1253 * Shutdown the hardware and driver.
1255 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1256 callout_stop(&sc->sc_watchdog);
1257 sc->sc_tx_timer = 0;
1263 mwl_stop(struct ifnet *ifp, int disable)
1265 struct mwl_softc *sc = ifp->if_softc;
1268 mwl_stop_locked(ifp, disable);
1273 mwl_reset_vap(struct ieee80211vap *vap, int state)
1275 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1276 struct ieee80211com *ic = vap->iv_ic;
1278 if (state == IEEE80211_S_RUN)
1281 mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
1282 /* XXX auto? 20/40 split? */
1283 mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
1284 (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
1285 mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
1286 HTPROTECT_NONE : HTPROTECT_AUTO);
1287 /* XXX txpower cap */
1289 /* re-setup beacons */
1290 if (state == IEEE80211_S_RUN &&
1291 (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1292 vap->iv_opmode == IEEE80211_M_MBSS ||
1293 vap->iv_opmode == IEEE80211_M_IBSS)) {
1294 mwl_setapmode(vap, vap->iv_bss->ni_chan);
1295 mwl_hal_setnprotmode(hvap,
1296 MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1297 return mwl_beacon_setup(vap);
1303 * Reset the hardware w/o losing operational state.
1304 * Used to to reset or reload hardware state for a vap.
1307 mwl_reset(struct ieee80211vap *vap, u_long cmd)
1309 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1312 if (hvap != NULL) { /* WDS, MONITOR, etc. */
1313 struct ieee80211com *ic = vap->iv_ic;
1314 struct ifnet *ifp = ic->ic_ifp;
1315 struct mwl_softc *sc = ifp->if_softc;
1316 struct mwl_hal *mh = sc->sc_mh;
1318 /* XXX handle DWDS sta vap change */
1319 /* XXX do we need to disable interrupts? */
1320 mwl_hal_intrset(mh, 0); /* disable interrupts */
1321 error = mwl_reset_vap(vap, vap->iv_state);
1322 mwl_hal_intrset(mh, sc->sc_imask);
1328 * Allocate a tx buffer for sending a frame. The
1329 * packet is assumed to have the WME AC stored so
1330 * we can use it to select the appropriate h/w queue.
1332 static struct mwl_txbuf *
1333 mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
1335 struct mwl_txbuf *bf;
1338 * Grab a TX buffer and associated resources.
1341 bf = STAILQ_FIRST(&txq->free);
1343 STAILQ_REMOVE_HEAD(&txq->free, bf_list);
1346 MWL_TXQ_UNLOCK(txq);
1348 DPRINTF(sc, MWL_DEBUG_XMIT,
1349 "%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
1354 * Return a tx buffer to the queue it came from. Note there
1355 * are two cases because we must preserve the order of buffers
1356 * as it reflects the fixed order of descriptors in memory
1357 * (the firmware pre-fetches descriptors so we cannot reorder).
1360 mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
1365 STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
1367 MWL_TXQ_UNLOCK(txq);
1371 mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
1376 STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
1378 MWL_TXQ_UNLOCK(txq);
1382 mwl_start(struct ifnet *ifp)
1384 struct mwl_softc *sc = ifp->if_softc;
1385 struct ieee80211_node *ni;
1386 struct mwl_txbuf *bf;
1388 struct mwl_txq *txq = NULL; /* XXX silence gcc */
1391 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1396 IFQ_DEQUEUE(&ifp->if_snd, m);
1400 * Grab the node for the destination.
1402 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1403 KASSERT(ni != NULL, ("no node"));
1404 m->m_pkthdr.rcvif = NULL; /* committed, clear ref */
1406 * Grab a TX buffer and associated resources.
1407 * We honor the classification by the 802.11 layer.
1409 txq = sc->sc_ac2q[M_WME_GETAC(m)];
1410 bf = mwl_gettxbuf(sc, txq);
1413 ieee80211_free_node(ni);
1414 #ifdef MWL_TX_NODROP
1415 sc->sc_stats.mst_tx_qstop++;
1416 /* XXX blocks other traffic */
1417 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1420 DPRINTF(sc, MWL_DEBUG_XMIT,
1421 "%s: tail drop on q %d\n", __func__, txq->qnum);
1422 sc->sc_stats.mst_tx_qdrop++;
1424 #endif /* MWL_TX_NODROP */
1428 * Pass the frame to the h/w for transmission.
1430 if (mwl_tx_start(sc, ni, bf, m)) {
1432 mwl_puttxbuf_head(txq, bf);
1433 ieee80211_free_node(ni);
1437 if (nqueued >= mwl_txcoalesce) {
1439 * Poke the firmware to process queued frames;
1440 * see below about (lack of) locking.
1443 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1448 * NB: We don't need to lock against tx done because
1449 * this just prods the firmware to check the transmit
1450 * descriptors. The firmware will also start fetching
1451 * descriptors by itself if it notices new ones are
1452 * present when it goes to deliver a tx done interrupt
1453 * to the host. So if we race with tx done processing
1454 * it's ok. Delivering the kick here rather than in
1455 * mwl_tx_start is an optimization to avoid poking the
1456 * firmware for each packet.
1458 * NB: the queue id isn't used so 0 is ok.
1460 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1465 mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
1466 const struct ieee80211_bpf_params *params)
1468 struct ieee80211com *ic = ni->ni_ic;
1469 struct ifnet *ifp = ic->ic_ifp;
1470 struct mwl_softc *sc = ifp->if_softc;
1471 struct mwl_txbuf *bf;
1472 struct mwl_txq *txq;
1474 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
1475 ieee80211_free_node(ni);
1480 * Grab a TX buffer and associated resources.
1481 * Note that we depend on the classification
1482 * by the 802.11 layer to get to the right h/w
1483 * queue. Management frames must ALWAYS go on
1484 * queue 1 but we cannot just force that here
1485 * because we may receive non-mgt frames.
1487 txq = sc->sc_ac2q[M_WME_GETAC(m)];
1488 bf = mwl_gettxbuf(sc, txq);
1490 sc->sc_stats.mst_tx_qstop++;
1491 /* XXX blocks other traffic */
1492 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1493 ieee80211_free_node(ni);
1498 * Pass the frame to the h/w for transmission.
1500 if (mwl_tx_start(sc, ni, bf, m)) {
1502 mwl_puttxbuf_head(txq, bf);
1504 ieee80211_free_node(ni);
1505 return EIO; /* XXX */
1508 * NB: We don't need to lock against tx done because
1509 * this just prods the firmware to check the transmit
1510 * descriptors. The firmware will also start fetching
1511 * descriptors by itself if it notices new ones are
1512 * present when it goes to deliver a tx done interrupt
1513 * to the host. So if we race with tx done processing
1514 * it's ok. Delivering the kick here rather than in
1515 * mwl_tx_start is an optimization to avoid poking the
1516 * firmware for each packet.
1518 * NB: the queue id isn't used so 0 is ok.
1520 mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
1525 mwl_media_change(struct ifnet *ifp)
1527 struct ieee80211vap *vap = ifp->if_softc;
1530 error = ieee80211_media_change(ifp);
1531 /* NB: only the fixed rate can change and that doesn't need a reset */
1532 if (error == ENETRESET) {
1541 mwl_keyprint(struct mwl_softc *sc, const char *tag,
1542 const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
1544 static const char *ciphers[] = {
1551 printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
1552 for (i = 0, n = hk->keyLen; i < n; i++)
1553 printf(" %02x", hk->key.aes[i]);
1554 printf(" mac %s", ether_sprintf(mac));
1555 if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
1556 printf(" %s", "rxmic");
1557 for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
1558 printf(" %02x", hk->key.tkip.rxMic[i]);
1560 for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
1561 printf(" %02x", hk->key.tkip.txMic[i]);
1563 printf(" flags 0x%x\n", hk->keyFlags);
1568 * Allocate a key cache slot for a unicast key. The
1569 * firmware handles key allocation and every station is
1570 * guaranteed key space so we are always successful.
1573 mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
1574 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
1576 struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1578 if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
1579 (k->wk_flags & IEEE80211_KEY_GROUP)) {
1580 if (!(&vap->iv_nw_keys[0] <= k &&
1581 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
1582 /* should not happen */
1583 DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1584 "%s: bogus group key\n", __func__);
1587 /* give the caller what they requested */
1588 *keyix = *rxkeyix = k - vap->iv_nw_keys;
1591 * Firmware handles key allocation.
1593 *keyix = *rxkeyix = 0;
1599 * Delete a key entry allocated by mwl_key_alloc.
1602 mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
1604 struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1605 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1607 const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
1608 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1611 if (vap->iv_opmode != IEEE80211_M_WDS) {
1612 /* XXX monitor mode? */
1613 DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1614 "%s: no hvap for opmode %d\n", __func__,
1618 hvap = MWL_VAP(vap)->mv_ap_hvap;
1621 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
1622 __func__, k->wk_keyix);
1624 memset(&hk, 0, sizeof(hk));
1625 hk.keyIndex = k->wk_keyix;
1626 switch (k->wk_cipher->ic_cipher) {
1627 case IEEE80211_CIPHER_WEP:
1628 hk.keyTypeId = KEY_TYPE_ID_WEP;
1630 case IEEE80211_CIPHER_TKIP:
1631 hk.keyTypeId = KEY_TYPE_ID_TKIP;
1633 case IEEE80211_CIPHER_AES_CCM:
1634 hk.keyTypeId = KEY_TYPE_ID_AES;
1637 /* XXX should not happen */
1638 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1639 __func__, k->wk_cipher->ic_cipher);
1642 return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0); /*XXX*/
1646 addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
1648 if (k->wk_flags & IEEE80211_KEY_GROUP) {
1649 if (k->wk_flags & IEEE80211_KEY_XMIT)
1650 hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
1651 if (k->wk_flags & IEEE80211_KEY_RECV)
1652 hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
1659 * Set the key cache contents for the specified key. Key cache
1660 * slot(s) must already have been allocated by mwl_key_alloc.
1663 mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
1664 const uint8_t mac[IEEE80211_ADDR_LEN])
1666 #define GRPXMIT (IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
1667 /* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
1668 #define IEEE80211_IS_STATICKEY(k) \
1669 (((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
1670 (GRPXMIT|IEEE80211_KEY_RECV))
1671 struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
1672 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1673 const struct ieee80211_cipher *cip = k->wk_cipher;
1674 const uint8_t *macaddr;
1677 KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
1678 ("s/w crypto set?"));
1681 if (vap->iv_opmode != IEEE80211_M_WDS) {
1682 /* XXX monitor mode? */
1683 DPRINTF(sc, MWL_DEBUG_KEYCACHE,
1684 "%s: no hvap for opmode %d\n", __func__,
1688 hvap = MWL_VAP(vap)->mv_ap_hvap;
1690 memset(&hk, 0, sizeof(hk));
1691 hk.keyIndex = k->wk_keyix;
1692 switch (cip->ic_cipher) {
1693 case IEEE80211_CIPHER_WEP:
1694 hk.keyTypeId = KEY_TYPE_ID_WEP;
1695 hk.keyLen = k->wk_keylen;
1696 if (k->wk_keyix == vap->iv_def_txkey)
1697 hk.keyFlags = KEY_FLAG_WEP_TXKEY;
1698 if (!IEEE80211_IS_STATICKEY(k)) {
1699 /* NB: WEP is never used for the PTK */
1700 (void) addgroupflags(&hk, k);
1703 case IEEE80211_CIPHER_TKIP:
1704 hk.keyTypeId = KEY_TYPE_ID_TKIP;
1705 hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
1706 hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
1707 hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
1708 hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
1709 if (!addgroupflags(&hk, k))
1710 hk.keyFlags |= KEY_FLAG_PAIRWISE;
1712 case IEEE80211_CIPHER_AES_CCM:
1713 hk.keyTypeId = KEY_TYPE_ID_AES;
1714 hk.keyLen = k->wk_keylen;
1715 if (!addgroupflags(&hk, k))
1716 hk.keyFlags |= KEY_FLAG_PAIRWISE;
1719 /* XXX should not happen */
1720 DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
1721 __func__, k->wk_cipher->ic_cipher);
1725 * NB: tkip mic keys get copied here too; the layout
1726 * just happens to match that in ieee80211_key.
1728 memcpy(hk.key.aes, k->wk_key, hk.keyLen);
1731 * Locate address of sta db entry for writing key;
1732 * the convention unfortunately is somewhat different
1733 * than how net80211, hostapd, and wpa_supplicant think.
1735 if (vap->iv_opmode == IEEE80211_M_STA) {
1737 * NB: keys plumbed before the sta reaches AUTH state
1738 * will be discarded or written to the wrong sta db
1739 * entry because iv_bss is meaningless. This is ok
1740 * (right now) because we handle deferred plumbing of
1741 * WEP keys when the sta reaches AUTH state.
1743 macaddr = vap->iv_bss->ni_bssid;
1744 if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
1745 /* XXX plumb to local sta db too for static key wep */
1746 mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
1748 } else if (vap->iv_opmode == IEEE80211_M_WDS &&
1749 vap->iv_state != IEEE80211_S_RUN) {
1751 * Prior to RUN state a WDS vap will not it's BSS node
1752 * setup so we will plumb the key to the wrong mac
1753 * address (it'll be our local address). Workaround
1754 * this for the moment by grabbing the correct address.
1756 macaddr = vap->iv_des_bssid;
1757 } else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
1758 macaddr = vap->iv_myaddr;
1761 KEYPRINTF(sc, &hk, macaddr);
1762 return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
1763 #undef IEEE80211_IS_STATICKEY
1767 /* unaligned little endian access */
1768 #define LE_READ_2(p) \
1770 ((((const uint8_t *)(p))[0] ) | \
1771 (((const uint8_t *)(p))[1] << 8)))
1772 #define LE_READ_4(p) \
1774 ((((const uint8_t *)(p))[0] ) | \
1775 (((const uint8_t *)(p))[1] << 8) | \
1776 (((const uint8_t *)(p))[2] << 16) | \
1777 (((const uint8_t *)(p))[3] << 24)))
1780 * Set the multicast filter contents into the hardware.
1781 * XXX f/w has no support; just defer to the os.
1784 mwl_setmcastfilter(struct mwl_softc *sc)
1786 struct ifnet *ifp = sc->sc_ifp;
1788 struct ether_multi *enm;
1789 struct ether_multistep estep;
1790 uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
1796 ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
1797 while (enm != NULL) {
1798 /* XXX Punt on ranges. */
1799 if (nmc == MWL_HAL_MCAST_MAX ||
1800 !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
1801 ifp->if_flags |= IFF_ALLMULTI;
1804 IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
1805 mp += IEEE80211_ADDR_LEN, nmc++;
1806 ETHER_NEXT_MULTI(estep, enm);
1808 ifp->if_flags &= ~IFF_ALLMULTI;
1809 mwl_hal_setmcast(sc->sc_mh, nmc, macs);
1811 /* XXX no mcast filter support; we get everything */
1812 ifp->if_flags |= IFF_ALLMULTI;
1817 mwl_mode_init(struct mwl_softc *sc)
1819 struct ifnet *ifp = sc->sc_ifp;
1820 struct ieee80211com *ic = ifp->if_l2com;
1821 struct mwl_hal *mh = sc->sc_mh;
1824 * NB: Ignore promisc in hostap mode; it's set by the
1825 * bridge. This is wrong but we have no way to
1826 * identify internal requests (from the bridge)
1827 * versus external requests such as for tcpdump.
1829 mwl_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) &&
1830 ic->ic_opmode != IEEE80211_M_HOSTAP);
1831 mwl_setmcastfilter(sc);
1837 * Callback from the 802.11 layer after a multicast state change.
1840 mwl_update_mcast(struct ifnet *ifp)
1842 struct mwl_softc *sc = ifp->if_softc;
1844 mwl_setmcastfilter(sc);
1848 * Callback from the 802.11 layer after a promiscuous mode change.
1849 * Note this interface does not check the operating mode as this
1850 * is an internal callback and we are expected to honor the current
1851 * state (e.g. this is used for setting the interface in promiscuous
1852 * mode when operating in hostap mode to do ACS).
1855 mwl_update_promisc(struct ifnet *ifp)
1857 struct mwl_softc *sc = ifp->if_softc;
1859 mwl_hal_setpromisc(sc->sc_mh, (ifp->if_flags & IFF_PROMISC) != 0);
1863 * Callback from the 802.11 layer to update the slot time
1864 * based on the current setting. We use it to notify the
1865 * firmware of ERP changes and the f/w takes care of things
1866 * like slot time and preamble.
1869 mwl_updateslot(struct ifnet *ifp)
1871 struct mwl_softc *sc = ifp->if_softc;
1872 struct ieee80211com *ic = ifp->if_l2com;
1873 struct mwl_hal *mh = sc->sc_mh;
1876 /* NB: can be called early; suppress needless cmds */
1877 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1881 * Calculate the ERP flags. The firwmare will use
1882 * this to carry out the appropriate measures.
1885 if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
1886 if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
1887 prot |= IEEE80211_ERP_NON_ERP_PRESENT;
1888 if (ic->ic_flags & IEEE80211_F_USEPROT)
1889 prot |= IEEE80211_ERP_USE_PROTECTION;
1890 if (ic->ic_flags & IEEE80211_F_USEBARKER)
1891 prot |= IEEE80211_ERP_LONG_PREAMBLE;
1894 DPRINTF(sc, MWL_DEBUG_RESET,
1895 "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
1896 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
1897 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
1900 mwl_hal_setgprot(mh, prot);
1904 * Setup the beacon frame.
1907 mwl_beacon_setup(struct ieee80211vap *vap)
1909 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1910 struct ieee80211_node *ni = vap->iv_bss;
1911 struct ieee80211_beacon_offsets bo;
1914 m = ieee80211_beacon_alloc(ni, &bo);
1917 mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
1924 * Update the beacon frame in response to a change.
1927 mwl_beacon_update(struct ieee80211vap *vap, int item)
1929 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
1930 struct ieee80211com *ic = vap->iv_ic;
1932 KASSERT(hvap != NULL, ("no beacon"));
1934 case IEEE80211_BEACON_ERP:
1935 mwl_updateslot(ic->ic_ifp);
1937 case IEEE80211_BEACON_HTINFO:
1938 mwl_hal_setnprotmode(hvap,
1939 MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
1941 case IEEE80211_BEACON_CAPS:
1942 case IEEE80211_BEACON_WME:
1943 case IEEE80211_BEACON_APPIE:
1944 case IEEE80211_BEACON_CSA:
1946 case IEEE80211_BEACON_TIM:
1947 /* NB: firmware always forms TIM */
1950 /* XXX retain beacon frame and update */
1951 mwl_beacon_setup(vap);
1955 mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1957 bus_addr_t *paddr = (bus_addr_t*) arg;
1958 KASSERT(error == 0, ("error %u on bus_dma callback", error));
1959 *paddr = segs->ds_addr;
1962 #ifdef MWL_HOST_PS_SUPPORT
1964 * Handle power save station occupancy changes.
1967 mwl_update_ps(struct ieee80211vap *vap, int nsta)
1969 struct mwl_vap *mvp = MWL_VAP(vap);
1971 if (nsta == 0 || mvp->mv_last_ps_sta == 0)
1972 mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
1973 mvp->mv_last_ps_sta = nsta;
1977 * Handle associated station power save state changes.
1980 mwl_set_tim(struct ieee80211_node *ni, int set)
1982 struct ieee80211vap *vap = ni->ni_vap;
1983 struct mwl_vap *mvp = MWL_VAP(vap);
1985 if (mvp->mv_set_tim(ni, set)) { /* NB: state change */
1986 mwl_hal_setpowersave_sta(mvp->mv_hvap,
1987 IEEE80211_AID(ni->ni_associd), set);
1992 #endif /* MWL_HOST_PS_SUPPORT */
1995 mwl_desc_setup(struct mwl_softc *sc, const char *name,
1996 struct mwl_descdma *dd,
1997 int nbuf, size_t bufsize, int ndesc, size_t descsize)
1999 struct ifnet *ifp = sc->sc_ifp;
2003 DPRINTF(sc, MWL_DEBUG_RESET,
2004 "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
2005 __func__, name, nbuf, (uintmax_t) bufsize,
2006 ndesc, (uintmax_t) descsize);
2009 dd->dd_desc_len = nbuf * ndesc * descsize;
2012 * Setup DMA descriptor area.
2014 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
2015 PAGE_SIZE, 0, /* alignment, bounds */
2016 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2017 BUS_SPACE_MAXADDR, /* highaddr */
2018 NULL, NULL, /* filter, filterarg */
2019 dd->dd_desc_len, /* maxsize */
2021 dd->dd_desc_len, /* maxsegsize */
2022 BUS_DMA_ALLOCNOW, /* flags */
2023 NULL, /* lockfunc */
2027 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
2031 /* allocate descriptors */
2032 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
2034 if_printf(ifp, "unable to create dmamap for %s descriptors, "
2035 "error %u\n", dd->dd_name, error);
2039 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
2040 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2043 if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
2044 "error %u\n", nbuf * ndesc, dd->dd_name, error);
2048 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
2049 dd->dd_desc, dd->dd_desc_len,
2050 mwl_load_cb, &dd->dd_desc_paddr,
2053 if_printf(ifp, "unable to map %s descriptors, error %u\n",
2054 dd->dd_name, error);
2059 memset(ds, 0, dd->dd_desc_len);
2060 DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
2061 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
2062 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
2066 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2068 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2070 bus_dma_tag_destroy(dd->dd_dmat);
2071 memset(dd, 0, sizeof(*dd));
2077 mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
2079 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
2080 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
2081 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
2082 bus_dma_tag_destroy(dd->dd_dmat);
2084 memset(dd, 0, sizeof(*dd));
2088 * Construct a tx q's free list. The order of entries on
2089 * the list must reflect the physical layout of tx descriptors
2090 * because the firmware pre-fetches descriptors.
2092 * XXX might be better to use indices into the buffer array.
2095 mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
2097 struct mwl_txbuf *bf;
2100 bf = txq->dma.dd_bufptr;
2101 STAILQ_INIT(&txq->free);
2102 for (i = 0; i < mwl_txbuf; i++, bf++)
2103 STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
2107 #define DS2PHYS(_dd, _ds) \
2108 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
2111 mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
2113 struct ifnet *ifp = sc->sc_ifp;
2114 int error, bsize, i;
2115 struct mwl_txbuf *bf;
2116 struct mwl_txdesc *ds;
2118 error = mwl_desc_setup(sc, "tx", &txq->dma,
2119 mwl_txbuf, sizeof(struct mwl_txbuf),
2120 MWL_TXDESC, sizeof(struct mwl_txdesc));
2124 /* allocate and setup tx buffers */
2125 bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
2126 bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2128 if_printf(ifp, "malloc of %u tx buffers failed\n",
2132 txq->dma.dd_bufptr = bf;
2134 ds = txq->dma.dd_desc;
2135 for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
2137 bf->bf_daddr = DS2PHYS(&txq->dma, ds);
2138 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
2141 if_printf(ifp, "unable to create dmamap for tx "
2142 "buffer %u, error %u\n", i, error);
2146 mwl_txq_reset(sc, txq);
2151 mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
2153 struct mwl_txbuf *bf;
2156 bf = txq->dma.dd_bufptr;
2157 for (i = 0; i < mwl_txbuf; i++, bf++) {
2158 KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
2159 KASSERT(bf->bf_node == NULL, ("node on free list"));
2160 if (bf->bf_dmamap != NULL)
2161 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
2163 STAILQ_INIT(&txq->free);
2165 if (txq->dma.dd_bufptr != NULL) {
2166 free(txq->dma.dd_bufptr, M_MWLDEV);
2167 txq->dma.dd_bufptr = NULL;
2169 if (txq->dma.dd_desc_len != 0)
2170 mwl_desc_cleanup(sc, &txq->dma);
2174 mwl_rxdma_setup(struct mwl_softc *sc)
2176 struct ifnet *ifp = sc->sc_ifp;
2177 int error, jumbosize, bsize, i;
2178 struct mwl_rxbuf *bf;
2179 struct mwl_jumbo *rbuf;
2180 struct mwl_rxdesc *ds;
2183 error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
2184 mwl_rxdesc, sizeof(struct mwl_rxbuf),
2185 1, sizeof(struct mwl_rxdesc));
2190 * Receive is done to a private pool of jumbo buffers.
2191 * This allows us to attach to mbuf's and avoid re-mapping
2192 * memory on each rx we post. We allocate a large chunk
2193 * of memory and manage it in the driver. The mbuf free
2194 * callback method is used to reclaim frames after sending
2195 * them up the stack. By default we allocate 2x the number of
2196 * rx descriptors configured so we have some slop to hold
2197 * us while frames are processed.
2199 if (mwl_rxbuf < 2*mwl_rxdesc) {
2201 "too few rx dma buffers (%d); increasing to %d\n",
2202 mwl_rxbuf, 2*mwl_rxdesc);
2203 mwl_rxbuf = 2*mwl_rxdesc;
2205 jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
2206 sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
2208 error = bus_dma_tag_create(sc->sc_dmat, /* parent */
2209 PAGE_SIZE, 0, /* alignment, bounds */
2210 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2211 BUS_SPACE_MAXADDR, /* highaddr */
2212 NULL, NULL, /* filter, filterarg */
2213 sc->sc_rxmemsize, /* maxsize */
2215 sc->sc_rxmemsize, /* maxsegsize */
2216 BUS_DMA_ALLOCNOW, /* flags */
2217 NULL, /* lockfunc */
2220 error = bus_dmamap_create(sc->sc_rxdmat, BUS_DMA_NOWAIT, &sc->sc_rxmap);
2222 if_printf(ifp, "could not create rx DMA map\n");
2226 error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
2227 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
2230 if_printf(ifp, "could not alloc %ju bytes of rx DMA memory\n",
2231 (uintmax_t) sc->sc_rxmemsize);
2235 error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
2236 sc->sc_rxmem, sc->sc_rxmemsize,
2237 mwl_load_cb, &sc->sc_rxmem_paddr,
2240 if_printf(ifp, "could not load rx DMA map\n");
2245 * Allocate rx buffers and set them up.
2247 bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
2248 bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
2250 if_printf(ifp, "malloc of %u rx buffers failed\n", bsize);
2253 sc->sc_rxdma.dd_bufptr = bf;
2255 STAILQ_INIT(&sc->sc_rxbuf);
2256 ds = sc->sc_rxdma.dd_desc;
2257 for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
2259 bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
2260 /* pre-assign dma buffer */
2261 bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2262 /* NB: tail is intentional to preserve descriptor order */
2263 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
2267 * Place remainder of dma memory buffers on the free list.
2269 SLIST_INIT(&sc->sc_rxfree);
2270 for (; i < mwl_rxbuf; i++) {
2271 data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
2272 rbuf = MWL_JUMBO_DATA2BUF(data);
2273 SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
2276 MWL_RXFREE_INIT(sc);
2282 mwl_rxdma_cleanup(struct mwl_softc *sc)
2284 if (sc->sc_rxmap != NULL)
2285 bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
2286 if (sc->sc_rxmem != NULL) {
2287 bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
2288 sc->sc_rxmem = NULL;
2290 if (sc->sc_rxmap != NULL) {
2291 bus_dmamap_destroy(sc->sc_rxdmat, sc->sc_rxmap);
2292 sc->sc_rxmap = NULL;
2294 if (sc->sc_rxdma.dd_bufptr != NULL) {
2295 free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
2296 sc->sc_rxdma.dd_bufptr = NULL;
2298 if (sc->sc_rxdma.dd_desc_len != 0)
2299 mwl_desc_cleanup(sc, &sc->sc_rxdma);
2300 MWL_RXFREE_DESTROY(sc);
2304 mwl_dma_setup(struct mwl_softc *sc)
2308 error = mwl_rxdma_setup(sc);
2310 mwl_rxdma_cleanup(sc);
2314 for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
2315 error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
2317 mwl_dma_cleanup(sc);
2325 mwl_dma_cleanup(struct mwl_softc *sc)
2329 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
2330 mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
2331 mwl_rxdma_cleanup(sc);
2334 static struct ieee80211_node *
2335 mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2337 struct ieee80211com *ic = vap->iv_ic;
2338 struct mwl_softc *sc = ic->ic_ifp->if_softc;
2339 const size_t space = sizeof(struct mwl_node);
2340 struct mwl_node *mn;
2342 mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
2347 DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
2348 return &mn->mn_node;
2352 mwl_node_cleanup(struct ieee80211_node *ni)
2354 struct ieee80211com *ic = ni->ni_ic;
2355 struct mwl_softc *sc = ic->ic_ifp->if_softc;
2356 struct mwl_node *mn = MWL_NODE(ni);
2358 DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
2359 __func__, ni, ni->ni_ic, mn->mn_staid);
2361 if (mn->mn_staid != 0) {
2362 struct ieee80211vap *vap = ni->ni_vap;
2364 if (mn->mn_hvap != NULL) {
2365 if (vap->iv_opmode == IEEE80211_M_STA)
2366 mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2368 mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
2371 * NB: legacy WDS peer sta db entry is installed using
2372 * the associate ap's hvap; use it again to delete it.
2373 * XXX can vap be NULL?
2375 else if (vap->iv_opmode == IEEE80211_M_WDS &&
2376 MWL_VAP(vap)->mv_ap_hvap != NULL)
2377 mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
2379 delstaid(sc, mn->mn_staid);
2382 sc->sc_node_cleanup(ni);
2386 * Reclaim rx dma buffers from packets sitting on the ampdu
2387 * reorder queue for a station. We replace buffers with a
2388 * system cluster (if available).
2391 mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
2398 n = rap->rxa_qframes;
2399 for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
2404 /* our dma buffers have a well-known free routine */
2405 if ((m->m_flags & M_EXT) == 0 ||
2406 m->m_ext.ext_free != mwl_ext_free)
2409 * Try to allocate a cluster and move the data.
2411 off = m->m_data - m->m_ext.ext_buf;
2412 if (off + m->m_pkthdr.len > MCLBYTES) {
2413 /* XXX no AMSDU for now */
2416 cl = pool_cache_get_paddr(&mclpool_cache, 0,
2417 &m->m_ext.ext_paddr);
2420 * Copy the existing data to the cluster, remove
2421 * the rx dma buffer, and attach the cluster in
2422 * its place. Note we preserve the offset to the
2423 * data so frames being bridged can still prepend
2424 * their headers without adding another mbuf.
2426 memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
2428 MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
2429 /* setup mbuf like _MCLGET does */
2430 m->m_flags |= M_CLUSTER | M_EXT_RW;
2431 _MOWNERREF(m, M_EXT | M_CLUSTER);
2432 /* NB: m_data is clobbered by MEXTADDR, adjust */
2440 * Callback to reclaim resources. We first let the
2441 * net80211 layer do it's thing, then if we are still
2442 * blocked by a lack of rx dma buffers we walk the ampdu
2443 * reorder q's to reclaim buffers by copying to a system
2447 mwl_node_drain(struct ieee80211_node *ni)
2449 struct ieee80211com *ic = ni->ni_ic;
2450 struct mwl_softc *sc = ic->ic_ifp->if_softc;
2451 struct mwl_node *mn = MWL_NODE(ni);
2453 DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
2454 __func__, ni, ni->ni_vap, mn->mn_staid);
2456 /* NB: call up first to age out ampdu q's */
2457 sc->sc_node_drain(ni);
2459 /* XXX better to not check low water mark? */
2460 if (sc->sc_rxblocked && mn->mn_staid != 0 &&
2461 (ni->ni_flags & IEEE80211_NODE_HT)) {
2464 * Walk the reorder q and reclaim rx dma buffers by copying
2465 * the packet contents into clusters.
2467 for (tid = 0; tid < WME_NUM_TID; tid++) {
2468 struct ieee80211_rx_ampdu *rap;
2470 rap = &ni->ni_rx_ampdu[tid];
2471 if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
2473 if (rap->rxa_qframes)
2474 mwl_ampdu_rxdma_reclaim(rap);
2480 mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
2482 *rssi = ni->ni_ic->ic_node_getrssi(ni);
2483 #ifdef MWL_ANT_INFO_SUPPORT
2485 /* XXX need to smooth data */
2486 *noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
2488 *noise = -95; /* XXX */
2491 *noise = -95; /* XXX */
2496 * Convert Hardware per-antenna rssi info to common format:
2497 * Let a1, a2, a3 represent the amplitudes per chain
2498 * Let amax represent max[a1, a2, a3]
2499 * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
2500 * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
2501 * We store a table that is 4*20*log10(idx) - the extra 4 is to store or
2502 * maintain some extra precision.
2504 * Values are stored in .5 db format capped at 127.
2507 mwl_node_getmimoinfo(const struct ieee80211_node *ni,
2508 struct ieee80211_mimo_info *mi)
2510 #define CVT(_dst, _src) do { \
2511 (_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2); \
2512 (_dst) = (_dst) > 64 ? 127 : ((_dst) << 1); \
2514 static const int8_t logdbtbl[32] = {
2515 0, 0, 24, 38, 48, 56, 62, 68,
2516 72, 76, 80, 83, 86, 89, 92, 94,
2517 96, 98, 100, 102, 104, 106, 107, 109,
2518 110, 112, 113, 115, 116, 117, 118, 119
2520 const struct mwl_node *mn = MWL_NODE_CONST(ni);
2521 uint8_t rssi = mn->mn_ai.rsvd1/2; /* XXX */
2524 rssi_max = mn->mn_ai.rssi_a;
2525 if (mn->mn_ai.rssi_b > rssi_max)
2526 rssi_max = mn->mn_ai.rssi_b;
2527 if (mn->mn_ai.rssi_c > rssi_max)
2528 rssi_max = mn->mn_ai.rssi_c;
2530 CVT(mi->rssi[0], mn->mn_ai.rssi_a);
2531 CVT(mi->rssi[1], mn->mn_ai.rssi_b);
2532 CVT(mi->rssi[2], mn->mn_ai.rssi_c);
2534 mi->noise[0] = mn->mn_ai.nf_a;
2535 mi->noise[1] = mn->mn_ai.nf_b;
2536 mi->noise[2] = mn->mn_ai.nf_c;
2540 static __inline void *
2541 mwl_getrxdma(struct mwl_softc *sc)
2543 struct mwl_jumbo *buf;
2547 * Allocate from jumbo pool.
2549 MWL_RXFREE_LOCK(sc);
2550 buf = SLIST_FIRST(&sc->sc_rxfree);
2552 DPRINTF(sc, MWL_DEBUG_ANY,
2553 "%s: out of rx dma buffers\n", __func__);
2554 sc->sc_stats.mst_rx_nodmabuf++;
2557 SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
2559 data = MWL_JUMBO_BUF2DATA(buf);
2561 MWL_RXFREE_UNLOCK(sc);
2565 static __inline void
2566 mwl_putrxdma(struct mwl_softc *sc, void *data)
2568 struct mwl_jumbo *buf;
2570 /* XXX bounds check data */
2571 MWL_RXFREE_LOCK(sc);
2572 buf = MWL_JUMBO_DATA2BUF(data);
2573 SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
2575 MWL_RXFREE_UNLOCK(sc);
2579 mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
2581 struct mwl_rxdesc *ds;
2584 if (bf->bf_data == NULL) {
2585 bf->bf_data = mwl_getrxdma(sc);
2586 if (bf->bf_data == NULL) {
2587 /* mark descriptor to be skipped */
2588 ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
2589 /* NB: don't need PREREAD */
2590 MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
2591 sc->sc_stats.mst_rxbuf_failed++;
2596 * NB: DMA buffer contents is known to be unmodified
2597 * so there's no need to flush the data cache.
2605 ds->Status = EAGLE_RXD_STATUS_IDLE;
2607 ds->PktLen = htole16(MWL_AGGR_SIZE);
2609 ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
2610 /* NB: don't touch pPhysNext, set once */
2611 ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
2612 MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2618 mwl_ext_free(void *data, void *arg)
2620 struct mwl_softc *sc = arg;
2622 /* XXX bounds check data */
2623 mwl_putrxdma(sc, data);
2625 * If we were previously blocked by a lack of rx dma buffers
2626 * check if we now have enough to restart rx interrupt handling.
2627 * NB: we know we are called at splvm which is above splnet.
2629 if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
2630 sc->sc_rxblocked = 0;
2631 mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
2635 struct mwl_frame_bar {
2638 u_int8_t i_ra[IEEE80211_ADDR_LEN];
2639 u_int8_t i_ta[IEEE80211_ADDR_LEN];
2644 * Like ieee80211_anyhdrsize, but handles BAR frames
2645 * specially so the logic below to piece the 802.11
2646 * header together works.
2649 mwl_anyhdrsize(const void *data)
2651 const struct ieee80211_frame *wh = data;
2653 if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
2654 switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
2655 case IEEE80211_FC0_SUBTYPE_CTS:
2656 case IEEE80211_FC0_SUBTYPE_ACK:
2657 return sizeof(struct ieee80211_frame_ack);
2658 case IEEE80211_FC0_SUBTYPE_BAR:
2659 return sizeof(struct mwl_frame_bar);
2661 return sizeof(struct ieee80211_frame_min);
2663 return ieee80211_hdrsize(data);
2667 mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
2669 const struct ieee80211_frame *wh;
2670 struct ieee80211_node *ni;
2672 wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
2673 ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
2675 ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
2676 ieee80211_free_node(ni);
2681 * Convert hardware signal strength to rssi. The value
2682 * provided by the device has the noise floor added in;
2683 * we need to compensate for this but we don't have that
2684 * so we use a fixed value.
2686 * The offset of 8 is good for both 2.4 and 5GHz. The LNA
2687 * offset is already set as part of the initial gain. This
2688 * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
2691 cvtrssi(uint8_t ssi)
2693 int rssi = (int) ssi + 8;
2694 /* XXX hack guess until we have a real noise floor */
2695 rssi = 2*(87 - rssi); /* NB: .5 dBm units */
2696 return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
2700 mwl_rx_proc(void *arg, int npending)
2702 #define IEEE80211_DIR_DSTODS(wh) \
2703 ((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
2704 struct mwl_softc *sc = arg;
2705 struct ifnet *ifp = sc->sc_ifp;
2706 struct ieee80211com *ic = ifp->if_l2com;
2707 struct mwl_rxbuf *bf;
2708 struct mwl_rxdesc *ds;
2710 struct ieee80211_qosframe *wh;
2711 struct ieee80211_qosframe_addr4 *wh4;
2712 struct ieee80211_node *ni;
2713 struct mwl_node *mn;
2714 int off, len, hdrlen, pktlen, rssi, ntodo;
2715 uint8_t *data, status;
2719 DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
2720 __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
2721 RD4(sc, sc->sc_hwspecs.rxDescWrite));
2724 for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
2726 bf = STAILQ_FIRST(&sc->sc_rxbuf);
2731 * If data allocation failed previously there
2732 * will be no buffer; try again to re-populate it.
2733 * Note the firmware will not advance to the next
2734 * descriptor with a dma buffer so we must mimic
2735 * this or we'll get out of sync.
2737 DPRINTF(sc, MWL_DEBUG_ANY,
2738 "%s: rx buf w/o dma memory\n", __func__);
2739 (void) mwl_rxbuf_init(sc, bf);
2740 sc->sc_stats.mst_rx_dmabufmissing++;
2743 MWL_RXDESC_SYNC(sc, ds,
2744 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2745 if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
2748 if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
2749 mwl_printrxbuf(bf, 0);
2751 status = ds->Status;
2752 if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
2754 sc->sc_stats.mst_rx_crypto++;
2756 * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
2757 * for backwards compatibility.
2759 if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
2760 (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
2762 * MIC error, notify upper layers.
2764 bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
2765 BUS_DMASYNC_POSTREAD);
2766 mwl_handlemicerror(ic, data);
2767 sc->sc_stats.mst_rx_tkipmic++;
2769 /* XXX too painful to tap packets */
2773 * Sync the data buffer.
2775 len = le16toh(ds->PktLen);
2776 bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
2778 * The 802.11 header is provided all or in part at the front;
2779 * use it to calculate the true size of the header that we'll
2780 * construct below. We use this to figure out where to copy
2781 * payload prior to constructing the header.
2783 hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
2784 off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
2786 /* calculate rssi early so we can re-use for each aggregate */
2787 rssi = cvtrssi(ds->RSSI);
2789 pktlen = hdrlen + (len - off);
2791 * NB: we know our frame is at least as large as
2792 * IEEE80211_MIN_LEN because there is a 4-address
2793 * frame at the front. Hence there's no need to
2794 * vet the packet length. If the frame in fact
2795 * is too small it should be discarded at the
2800 * Attach dma buffer to an mbuf. We tried
2801 * doing this based on the packet size (i.e.
2802 * copying small packets) but it turns out to
2803 * be a net loss. The tradeoff might be system
2804 * dependent (cache architecture is important).
2806 MGETHDR(m, M_DONTWAIT, MT_DATA);
2808 DPRINTF(sc, MWL_DEBUG_ANY,
2809 "%s: no rx mbuf\n", __func__);
2810 sc->sc_stats.mst_rx_nombuf++;
2814 * Acquire the replacement dma buffer before
2815 * processing the frame. If we're out of dma
2816 * buffers we disable rx interrupts and wait
2817 * for the free pool to reach mlw_rxdmalow buffers
2818 * before starting to do work again. If the firmware
2819 * runs out of descriptors then it will toss frames
2820 * which is better than our doing it as that can
2821 * starve our processing. It is also important that
2822 * we always process rx'd frames in case they are
2823 * A-MPDU as otherwise the host's view of the BA
2824 * window may get out of sync with the firmware.
2826 newdata = mwl_getrxdma(sc);
2827 if (newdata == NULL) {
2828 /* NB: stat+msg in mwl_getrxdma */
2830 /* disable RX interrupt and mark state */
2831 mwl_hal_intrset(sc->sc_mh,
2832 sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
2833 sc->sc_rxblocked = 1;
2834 ieee80211_drain(ic);
2835 /* XXX check rxblocked and immediately start again? */
2838 bf->bf_data = newdata;
2840 * Attach the dma buffer to the mbuf;
2841 * mwl_rxbuf_init will re-setup the rx
2842 * descriptor using the replacement dma
2843 * buffer we just installed above.
2845 MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
2846 data, sc, 0, EXT_NET_DRV);
2847 m->m_data += off - hdrlen;
2848 m->m_pkthdr.len = m->m_len = pktlen;
2849 m->m_pkthdr.rcvif = ifp;
2850 /* NB: dma buffer assumed read-only */
2853 * Piece 802.11 header together.
2855 wh = mtod(m, struct ieee80211_qosframe *);
2856 /* NB: don't need to do this sometimes but ... */
2857 /* XXX special case so we can memcpy after m_devget? */
2858 ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
2859 if (IEEE80211_QOS_HAS_SEQ(wh)) {
2860 if (IEEE80211_DIR_DSTODS(wh)) {
2862 struct ieee80211_qosframe_addr4*);
2863 *(uint16_t *)wh4->i_qos = ds->QosCtrl;
2865 *(uint16_t *)wh->i_qos = ds->QosCtrl;
2869 * The f/w strips WEP header but doesn't clear
2870 * the WEP bit; mark the packet with M_WEP so
2871 * net80211 will treat the data as decrypted.
2872 * While here also clear the PWR_MGT bit since
2873 * power save is handled by the firmware and
2874 * passing this up will potentially cause the
2875 * upper layer to put a station in power save
2876 * (except when configured with MWL_HOST_PS_SUPPORT).
2878 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
2879 m->m_flags |= M_WEP;
2880 #ifdef MWL_HOST_PS_SUPPORT
2881 wh->i_fc[1] &= ~IEEE80211_FC1_WEP;
2883 wh->i_fc[1] &= ~(IEEE80211_FC1_WEP | IEEE80211_FC1_PWR_MGT);
2886 if (ieee80211_radiotap_active(ic)) {
2887 struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
2890 tap->wr_rate = ds->Rate;
2891 tap->wr_antsignal = rssi + nf;
2892 tap->wr_antnoise = nf;
2894 if (IFF_DUMPPKTS_RECV(sc, wh)) {
2895 ieee80211_dump_pkt(ic, mtod(m, caddr_t),
2896 len, ds->Rate, rssi);
2901 ni = ieee80211_find_rxnode(ic,
2902 (const struct ieee80211_frame_min *) wh);
2905 #ifdef MWL_ANT_INFO_SUPPORT
2906 mn->mn_ai.rssi_a = ds->ai.rssi_a;
2907 mn->mn_ai.rssi_b = ds->ai.rssi_b;
2908 mn->mn_ai.rssi_c = ds->ai.rssi_c;
2909 mn->mn_ai.rsvd1 = rssi;
2911 /* tag AMPDU aggregates for reorder processing */
2912 if (ni->ni_flags & IEEE80211_NODE_HT)
2913 m->m_flags |= M_AMPDU;
2914 (void) ieee80211_input(ni, m, rssi, nf);
2915 ieee80211_free_node(ni);
2917 (void) ieee80211_input_all(ic, m, rssi, nf);
2919 /* NB: ignore ENOMEM so we process more descriptors */
2920 (void) mwl_rxbuf_init(sc, bf);
2921 bf = STAILQ_NEXT(bf, bf_list);
2926 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
2927 !IFQ_IS_EMPTY(&ifp->if_snd)) {
2928 /* NB: kick fw; the tx thread may have been preempted */
2929 mwl_hal_txstart(sc->sc_mh, 0);
2932 #undef IEEE80211_DIR_DSTODS
2936 mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
2938 struct mwl_txbuf *bf, *bn;
2939 struct mwl_txdesc *ds;
2941 MWL_TXQ_LOCK_INIT(sc, txq);
2943 txq->txpri = 0; /* XXX */
2945 /* NB: q setup by mwl_txdma_setup XXX */
2946 STAILQ_INIT(&txq->free);
2948 STAILQ_FOREACH(bf, &txq->free, bf_list) {
2952 bn = STAILQ_NEXT(bf, bf_list);
2954 bn = STAILQ_FIRST(&txq->free);
2955 ds->pPhysNext = htole32(bn->bf_daddr);
2957 STAILQ_INIT(&txq->active);
2961 * Setup a hardware data transmit queue for the specified
2962 * access control. We record the mapping from ac's
2963 * to h/w queues for use by mwl_tx_start.
2966 mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
2968 #define N(a) (sizeof(a)/sizeof(a[0]))
2969 struct mwl_txq *txq;
2971 if (ac >= N(sc->sc_ac2q)) {
2972 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
2973 ac, N(sc->sc_ac2q));
2976 if (mvtype >= MWL_NUM_TX_QUEUES) {
2977 device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
2978 mvtype, MWL_NUM_TX_QUEUES);
2981 txq = &sc->sc_txq[mvtype];
2982 mwl_txq_init(sc, txq, mvtype);
2983 sc->sc_ac2q[ac] = txq;
2989 * Update WME parameters for a transmit queue.
2992 mwl_txq_update(struct mwl_softc *sc, int ac)
2994 #define MWL_EXPONENT_TO_VALUE(v) ((1<<v)-1)
2995 struct ifnet *ifp = sc->sc_ifp;
2996 struct ieee80211com *ic = ifp->if_l2com;
2997 struct mwl_txq *txq = sc->sc_ac2q[ac];
2998 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
2999 struct mwl_hal *mh = sc->sc_mh;
3000 int aifs, cwmin, cwmax, txoplim;
3002 aifs = wmep->wmep_aifsn;
3003 /* XXX in sta mode need to pass log values for cwmin/max */
3004 cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
3005 cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
3006 txoplim = wmep->wmep_txopLimit; /* NB: units of 32us */
3008 if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
3009 device_printf(sc->sc_dev, "unable to update hardware queue "
3010 "parameters for %s traffic!\n",
3011 ieee80211_wme_acnames[ac]);
3015 #undef MWL_EXPONENT_TO_VALUE
3019 * Callback from the 802.11 layer to update WME parameters.
3022 mwl_wme_update(struct ieee80211com *ic)
3024 struct mwl_softc *sc = ic->ic_ifp->if_softc;
3026 return !mwl_txq_update(sc, WME_AC_BE) ||
3027 !mwl_txq_update(sc, WME_AC_BK) ||
3028 !mwl_txq_update(sc, WME_AC_VI) ||
3029 !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
3033 * Reclaim resources for a setup queue.
3036 mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
3039 MWL_TXQ_LOCK_DESTROY(txq);
3043 * Reclaim all tx queue resources.
3046 mwl_tx_cleanup(struct mwl_softc *sc)
3050 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3051 mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
3055 mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
3061 * Load the DMA map so any coalescing is done. This
3062 * also calculates the number of descriptors we need.
3064 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3065 bf->bf_segs, &bf->bf_nseg,
3067 if (error == EFBIG) {
3068 /* XXX packet requires too many descriptors */
3069 bf->bf_nseg = MWL_TXDESC+1;
3070 } else if (error != 0) {
3071 sc->sc_stats.mst_tx_busdma++;
3076 * Discard null packets and check for packets that
3077 * require too many TX descriptors. We try to convert
3078 * the latter to a cluster.
3080 if (error == EFBIG) { /* too many desc's, linearize */
3081 sc->sc_stats.mst_tx_linear++;
3083 m = m_collapse(m0, M_DONTWAIT, MWL_TXDESC);
3085 m = m_defrag(m0, M_DONTWAIT);
3089 sc->sc_stats.mst_tx_nombuf++;
3093 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
3094 bf->bf_segs, &bf->bf_nseg,
3097 sc->sc_stats.mst_tx_busdma++;
3101 KASSERT(bf->bf_nseg <= MWL_TXDESC,
3102 ("too many segments after defrag; nseg %u", bf->bf_nseg));
3103 } else if (bf->bf_nseg == 0) { /* null packet, discard */
3104 sc->sc_stats.mst_tx_nodata++;
3108 DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
3109 __func__, m0, m0->m_pkthdr.len);
3110 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3117 mwl_cvtlegacyrate(int rate)
3138 * Calculate fixed tx rate information per client state;
3139 * this value is suitable for writing to the Format field
3140 * of a tx descriptor.
3143 mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
3147 fmt = SM(3, EAGLE_TXD_ANTENNA)
3148 | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
3149 EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
3150 if (rate & IEEE80211_RATE_MCS) { /* HT MCS */
3151 fmt |= EAGLE_TXD_FORMAT_HT
3152 /* NB: 0x80 implicitly stripped from ucastrate */
3153 | SM(rate, EAGLE_TXD_RATE);
3154 /* XXX short/long GI may be wrong; re-check */
3155 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
3156 fmt |= EAGLE_TXD_CHW_40
3157 | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
3158 EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3160 fmt |= EAGLE_TXD_CHW_20
3161 | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
3162 EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
3164 } else { /* legacy rate */
3165 fmt |= EAGLE_TXD_FORMAT_LEGACY
3166 | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
3168 /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
3169 | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
3170 EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
3176 mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
3179 #define IEEE80211_DIR_DSTODS(wh) \
3180 ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
3181 struct ifnet *ifp = sc->sc_ifp;
3182 struct ieee80211com *ic = ifp->if_l2com;
3183 struct ieee80211vap *vap = ni->ni_vap;
3184 int error, iswep, ismcast;
3185 int hdrlen, copyhdrlen, pktlen;
3186 struct mwl_txdesc *ds;
3187 struct mwl_txq *txq;
3188 struct ieee80211_frame *wh;
3189 struct mwltxrec *tr;
3190 struct mwl_node *mn;
3196 wh = mtod(m0, struct ieee80211_frame *);
3197 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
3198 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
3199 hdrlen = ieee80211_anyhdrsize(wh);
3200 copyhdrlen = hdrlen;
3201 pktlen = m0->m_pkthdr.len;
3202 if (IEEE80211_QOS_HAS_SEQ(wh)) {
3203 if (IEEE80211_DIR_DSTODS(wh)) {
3205 (((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
3206 copyhdrlen -= sizeof(qos);
3209 (((struct ieee80211_qosframe *) wh)->i_qos);
3214 const struct ieee80211_cipher *cip;
3215 struct ieee80211_key *k;
3218 * Construct the 802.11 header+trailer for an encrypted
3219 * frame. The only reason this can fail is because of an
3220 * unknown or unsupported cipher/key type.
3222 * NB: we do this even though the firmware will ignore
3223 * what we've done for WEP and TKIP as we need the
3224 * ExtIV filled in for CCMP and this also adjusts
3225 * the headers which simplifies our work below.
3227 k = ieee80211_crypto_encap(ni, m0);
3230 * This can happen when the key is yanked after the
3231 * frame was queued. Just discard the frame; the
3232 * 802.11 layer counts failures and provides
3233 * debugging/diagnostics.
3239 * Adjust the packet length for the crypto additions
3240 * done during encap and any other bits that the f/w
3241 * will add later on.
3244 pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
3246 /* packet header may have moved, reset our local pointer */
3247 wh = mtod(m0, struct ieee80211_frame *);
3250 if (ieee80211_radiotap_active_vap(vap)) {
3251 sc->sc_tx_th.wt_flags = 0; /* XXX */
3253 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3255 sc->sc_tx_th.wt_rate = ds->DataRate;
3257 sc->sc_tx_th.wt_txpower = ni->ni_txpower;
3258 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
3260 ieee80211_radiotap_tx(vap, m0);
3263 * Copy up/down the 802.11 header; the firmware requires
3264 * we present a 2-byte payload length followed by a
3265 * 4-address header (w/o QoS), followed (optionally) by
3266 * any WEP/ExtIV header (but only filled in for CCMP).
3267 * We are assured the mbuf has sufficient headroom to
3268 * prepend in-place by the setup of ic_headroom in
3271 if (hdrlen < sizeof(struct mwltxrec)) {
3272 const int space = sizeof(struct mwltxrec) - hdrlen;
3273 if (M_LEADINGSPACE(m0) < space) {
3274 /* NB: should never happen */
3275 device_printf(sc->sc_dev,
3276 "not enough headroom, need %d found %zd, "
3277 "m_flags 0x%x m_len %d\n",
3278 space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
3279 ieee80211_dump_pkt(ic,
3280 mtod(m0, const uint8_t *), m0->m_len, 0, -1);
3282 sc->sc_stats.mst_tx_noheadroom++;
3285 M_PREPEND(m0, space, M_NOWAIT);
3287 tr = mtod(m0, struct mwltxrec *);
3288 if (wh != (struct ieee80211_frame *) &tr->wh)
3289 ovbcopy(wh, &tr->wh, hdrlen);
3291 * Note: the "firmware length" is actually the length
3292 * of the fully formed "802.11 payload". That is, it's
3293 * everything except for the 802.11 header. In particular
3294 * this includes all crypto material including the MIC!
3296 tr->fwlen = htole16(pktlen - hdrlen);
3299 * Load the DMA map so any coalescing is done. This
3300 * also calculates the number of descriptors we need.
3302 error = mwl_tx_dmasetup(sc, bf, m0);
3304 /* NB: stat collected in mwl_tx_dmasetup */
3305 DPRINTF(sc, MWL_DEBUG_XMIT,
3306 "%s: unable to setup dma\n", __func__);
3309 bf->bf_node = ni; /* NB: held reference */
3310 m0 = bf->bf_m; /* NB: may have changed */
3311 tr = mtod(m0, struct mwltxrec *);
3312 wh = (struct ieee80211_frame *)&tr->wh;
3315 * Formulate tx descriptor.
3320 ds->QosCtrl = qos; /* NB: already little-endian */
3323 * NB: multiframes should be zero because the descriptors
3324 * are initialized to zero. This should handle the case
3325 * where the driver is built with MWL_TXDESC=1 but we are
3326 * using firmware with multi-segment support.
3328 ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
3329 ds->PktLen = htole16(bf->bf_segs[0].ds_len);
3331 ds->multiframes = htole32(bf->bf_nseg);
3332 ds->PktLen = htole16(m0->m_pkthdr.len);
3333 for (i = 0; i < bf->bf_nseg; i++) {
3334 ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
3335 ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
3338 /* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
3341 ds->ack_wcb_addr = 0;
3345 * Select transmit rate.
3347 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
3348 case IEEE80211_FC0_TYPE_MGT:
3349 sc->sc_stats.mst_tx_mgmt++;
3351 case IEEE80211_FC0_TYPE_CTL:
3352 /* NB: assign to BE q to avoid bursting */
3353 ds->TxPriority = MWL_WME_AC_BE;
3355 case IEEE80211_FC0_TYPE_DATA:
3357 const struct ieee80211_txparam *tp = ni->ni_txparms;
3359 * EAPOL frames get forced to a fixed rate and w/o
3360 * aggregation; otherwise check for any fixed rate
3361 * for the client (may depend on association state).
3363 if (m0->m_flags & M_EAPOL) {
3364 const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
3365 ds->Format = mvp->mv_eapolformat;
3367 EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
3368 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3369 /* XXX pre-calculate per node */
3370 ds->Format = htole16(
3371 mwl_calcformat(tp->ucastrate, ni));
3372 ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
3374 /* NB: EAPOL frames will never have qos set */
3376 ds->TxPriority = txq->qnum;
3378 else if (mwl_bastream_match(&mn->mn_ba[3], qos))
3379 ds->TxPriority = mn->mn_ba[3].txq;
3382 else if (mwl_bastream_match(&mn->mn_ba[2], qos))
3383 ds->TxPriority = mn->mn_ba[2].txq;
3386 else if (mwl_bastream_match(&mn->mn_ba[1], qos))
3387 ds->TxPriority = mn->mn_ba[1].txq;
3390 else if (mwl_bastream_match(&mn->mn_ba[0], qos))
3391 ds->TxPriority = mn->mn_ba[0].txq;
3394 ds->TxPriority = txq->qnum;
3396 ds->TxPriority = txq->qnum;
3399 if_printf(ifp, "bogus frame type 0x%x (%s)\n",
3400 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
3401 sc->sc_stats.mst_tx_badframetype++;
3406 if (IFF_DUMPPKTS_XMIT(sc))
3407 ieee80211_dump_pkt(ic,
3408 mtod(m0, const uint8_t *)+sizeof(uint16_t),
3409 m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
3412 ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
3413 STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
3414 MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3417 sc->sc_tx_timer = 5;
3418 MWL_TXQ_UNLOCK(txq);
3421 #undef IEEE80211_DIR_DSTODS
3425 mwl_cvtlegacyrix(int rix)
3427 #define N(x) (sizeof(x)/sizeof(x[0]))
3428 static const int ieeerates[] =
3429 { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
3430 return (rix < N(ieeerates) ? ieeerates[rix] : 0);
3435 * Process completed xmit descriptors from the specified queue.
3438 mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
3440 #define EAGLE_TXD_STATUS_MCAST \
3441 (EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
3442 struct ifnet *ifp = sc->sc_ifp;
3443 struct ieee80211com *ic = ifp->if_l2com;
3444 struct mwl_txbuf *bf;
3445 struct mwl_txdesc *ds;
3446 struct ieee80211_node *ni;
3447 struct mwl_node *an;
3451 DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
3452 for (nreaped = 0;; nreaped++) {
3454 bf = STAILQ_FIRST(&txq->active);
3456 MWL_TXQ_UNLOCK(txq);
3460 MWL_TXDESC_SYNC(txq, ds,
3461 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3462 if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
3463 MWL_TXQ_UNLOCK(txq);
3466 STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3467 MWL_TXQ_UNLOCK(txq);
3470 if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
3471 mwl_printtxbuf(bf, txq->qnum, nreaped);
3476 status = le32toh(ds->Status);
3477 if (status & EAGLE_TXD_STATUS_OK) {
3478 uint16_t Format = le16toh(ds->Format);
3479 uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
3481 sc->sc_stats.mst_ant_tx[txant]++;
3482 if (status & EAGLE_TXD_STATUS_OK_RETRY)
3483 sc->sc_stats.mst_tx_retries++;
3484 if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
3485 sc->sc_stats.mst_tx_mretries++;
3486 if (txq->qnum >= MWL_WME_AC_VO)
3487 ic->ic_wme.wme_hipri_traffic++;
3488 ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
3489 if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
3490 ni->ni_txrate = mwl_cvtlegacyrix(
3493 ni->ni_txrate |= IEEE80211_RATE_MCS;
3494 sc->sc_stats.mst_tx_rate = ni->ni_txrate;
3496 if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
3497 sc->sc_stats.mst_tx_linkerror++;
3498 if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
3499 sc->sc_stats.mst_tx_xretries++;
3500 if (status & EAGLE_TXD_STATUS_FAILED_AGING)
3501 sc->sc_stats.mst_tx_aging++;
3502 if (bf->bf_m->m_flags & M_FF)
3503 sc->sc_stats.mst_ff_txerr++;
3506 * Do any tx complete callback. Note this must
3507 * be done before releasing the node reference.
3508 * XXX no way to figure out if frame was ACK'd
3510 if (bf->bf_m->m_flags & M_TXCB) {
3511 /* XXX strip fw len in case header inspected */
3512 m_adj(bf->bf_m, sizeof(uint16_t));
3513 ieee80211_process_callback(ni, bf->bf_m,
3514 (status & EAGLE_TXD_STATUS_OK) == 0);
3517 * Reclaim reference to node.
3519 * NB: the node may be reclaimed here if, for example
3520 * this is a DEAUTH message that was sent and the
3521 * node was timed out due to inactivity.
3523 ieee80211_free_node(ni);
3525 ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
3527 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3528 BUS_DMASYNC_POSTWRITE);
3529 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3532 mwl_puttxbuf_tail(txq, bf);
3535 #undef EAGLE_TXD_STATUS_MCAST
3539 * Deferred processing of transmit interrupt; special-cased
3540 * for four hardware queues, 0-3.
3543 mwl_tx_proc(void *arg, int npending)
3545 struct mwl_softc *sc = arg;
3546 struct ifnet *ifp = sc->sc_ifp;
3550 * Process each active queue.
3553 if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
3554 nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
3555 if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
3556 nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
3557 if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
3558 nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
3559 if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
3560 nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
3563 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3564 sc->sc_tx_timer = 0;
3565 if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
3566 /* NB: kick fw; the tx thread may have been preempted */
3567 mwl_hal_txstart(sc->sc_mh, 0);
3574 mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
3576 struct ieee80211_node *ni;
3577 struct mwl_txbuf *bf;
3581 * NB: this assumes output has been stopped and
3582 * we do not need to block mwl_tx_tasklet
3584 for (ix = 0;; ix++) {
3586 bf = STAILQ_FIRST(&txq->active);
3588 MWL_TXQ_UNLOCK(txq);
3591 STAILQ_REMOVE_HEAD(&txq->active, bf_list);
3592 MWL_TXQ_UNLOCK(txq);
3594 if (sc->sc_debug & MWL_DEBUG_RESET) {
3595 struct ifnet *ifp = sc->sc_ifp;
3596 struct ieee80211com *ic = ifp->if_l2com;
3597 const struct mwltxrec *tr =
3598 mtod(bf->bf_m, const struct mwltxrec *);
3599 mwl_printtxbuf(bf, txq->qnum, ix);
3600 ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
3601 bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
3603 #endif /* MWL_DEBUG */
3604 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3608 * Reclaim node reference.
3610 ieee80211_free_node(ni);
3614 mwl_puttxbuf_tail(txq, bf);
3619 * Drain the transmit queues and reclaim resources.
3622 mwl_draintxq(struct mwl_softc *sc)
3624 struct ifnet *ifp = sc->sc_ifp;
3627 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3628 mwl_tx_draintxq(sc, &sc->sc_txq[i]);
3629 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3630 sc->sc_tx_timer = 0;
3635 * Reset the transmit queues to a pristine state after a fw download.
3638 mwl_resettxq(struct mwl_softc *sc)
3642 for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
3643 mwl_txq_reset(sc, &sc->sc_txq[i]);
3645 #endif /* MWL_DIAGAPI */
3648 * Clear the transmit queues of any frames submitted for the
3649 * specified vap. This is done when the vap is deleted so we
3650 * don't potentially reference the vap after it is gone.
3651 * Note we cannot remove the frames; we only reclaim the node
3655 mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
3657 struct mwl_txq *txq;
3658 struct mwl_txbuf *bf;
3661 for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
3662 txq = &sc->sc_txq[i];
3664 STAILQ_FOREACH(bf, &txq->active, bf_list) {
3665 struct ieee80211_node *ni = bf->bf_node;
3666 if (ni != NULL && ni->ni_vap == vap) {
3668 ieee80211_free_node(ni);
3671 MWL_TXQ_UNLOCK(txq);
3676 mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
3677 const uint8_t *frm, const uint8_t *efrm)
3679 struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3680 const struct ieee80211_action *ia;
3682 ia = (const struct ieee80211_action *) frm;
3683 if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
3684 ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
3685 const struct ieee80211_action_ht_mimopowersave *mps =
3686 (const struct ieee80211_action_ht_mimopowersave *) ia;
3688 mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
3689 mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
3690 MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
3693 return sc->sc_recv_action(ni, wh, frm, efrm);
3697 mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3698 int dialogtoken, int baparamset, int batimeout)
3700 struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3701 struct ieee80211vap *vap = ni->ni_vap;
3702 struct mwl_node *mn = MWL_NODE(ni);
3703 struct mwl_bastate *bas;
3705 bas = tap->txa_private;
3707 const MWL_HAL_BASTREAM *sp;
3709 * Check for a free BA stream slot.
3712 if (mn->mn_ba[3].bastream == NULL)
3713 bas = &mn->mn_ba[3];
3717 if (mn->mn_ba[2].bastream == NULL)
3718 bas = &mn->mn_ba[2];
3722 if (mn->mn_ba[1].bastream == NULL)
3723 bas = &mn->mn_ba[1];
3727 if (mn->mn_ba[0].bastream == NULL)
3728 bas = &mn->mn_ba[0];
3732 /* sta already has max BA streams */
3733 /* XXX assign BA stream to highest priority tid */
3734 DPRINTF(sc, MWL_DEBUG_AMPDU,
3735 "%s: already has max bastreams\n", __func__);
3736 sc->sc_stats.mst_ampdu_reject++;
3739 /* NB: no held reference to ni */
3740 sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
3741 (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
3742 ni->ni_macaddr, WME_AC_TO_TID(tap->txa_ac), ni->ni_htparam,
3746 * No available stream, return 0 so no
3747 * a-mpdu aggregation will be done.
3749 DPRINTF(sc, MWL_DEBUG_AMPDU,
3750 "%s: no bastream available\n", __func__);
3751 sc->sc_stats.mst_ampdu_nostream++;
3754 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
3756 /* NB: qos is left zero so we won't match in mwl_tx_start */
3758 tap->txa_private = bas;
3760 /* fetch current seq# from the firmware; if available */
3761 if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
3762 vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
3763 &tap->txa_start) != 0)
3765 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
3769 mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
3770 int code, int baparamset, int batimeout)
3772 struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3773 struct mwl_bastate *bas;
3775 bas = tap->txa_private;
3777 /* XXX should not happen */
3778 DPRINTF(sc, MWL_DEBUG_AMPDU,
3779 "%s: no BA stream allocated, AC %d\n",
3780 __func__, tap->txa_ac);
3781 sc->sc_stats.mst_addba_nostream++;
3784 if (code == IEEE80211_STATUS_SUCCESS) {
3785 struct ieee80211vap *vap = ni->ni_vap;
3789 * Tell the firmware to setup the BA stream;
3790 * we know resources are available because we
3791 * pre-allocated one before forming the request.
3793 bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
3795 bufsiz = IEEE80211_AGGR_BAWMAX;
3796 error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
3797 bas->bastream, bufsiz, bufsiz, tap->txa_start);
3800 * Setup failed, return immediately so no a-mpdu
3801 * aggregation will be done.
3803 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3804 mwl_bastream_free(bas);
3805 tap->txa_private = NULL;
3807 DPRINTF(sc, MWL_DEBUG_AMPDU,
3808 "%s: create failed, error %d, bufsiz %d AC %d "
3809 "htparam 0x%x\n", __func__, error, bufsiz,
3810 tap->txa_ac, ni->ni_htparam);
3811 sc->sc_stats.mst_bacreate_failed++;
3814 /* NB: cache txq to avoid ptr indirect */
3815 mwl_bastream_setup(bas, tap->txa_ac, bas->bastream->txq);
3816 DPRINTF(sc, MWL_DEBUG_AMPDU,
3817 "%s: bastream %p assigned to txq %d AC %d bufsiz %d "
3818 "htparam 0x%x\n", __func__, bas->bastream,
3819 bas->txq, tap->txa_ac, bufsiz, ni->ni_htparam);
3822 * Other side NAK'd us; return the resources.
3824 DPRINTF(sc, MWL_DEBUG_AMPDU,
3825 "%s: request failed with code %d, destroy bastream %p\n",
3826 __func__, code, bas->bastream);
3827 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3828 mwl_bastream_free(bas);
3829 tap->txa_private = NULL;
3831 /* NB: firmware sends BAR so we don't need to */
3832 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
3836 mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
3838 struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
3839 struct mwl_bastate *bas;
3841 bas = tap->txa_private;
3843 DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
3844 __func__, bas->bastream);
3845 mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
3846 mwl_bastream_free(bas);
3847 tap->txa_private = NULL;
3849 sc->sc_addba_stop(ni, tap);
3853 * Setup the rx data structures. This should only be
3854 * done once or we may get out of sync with the firmware.
3857 mwl_startrecv(struct mwl_softc *sc)
3859 if (!sc->sc_recvsetup) {
3860 struct mwl_rxbuf *bf, *prev;
3861 struct mwl_rxdesc *ds;
3864 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
3865 int error = mwl_rxbuf_init(sc, bf);
3867 DPRINTF(sc, MWL_DEBUG_RECV,
3868 "%s: mwl_rxbuf_init failed %d\n",
3874 ds->pPhysNext = htole32(bf->bf_daddr);
3881 htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
3883 sc->sc_recvsetup = 1;
3885 mwl_mode_init(sc); /* set filters, etc. */
3889 static MWL_HAL_APMODE
3890 mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
3892 MWL_HAL_APMODE mode;
3894 if (IEEE80211_IS_CHAN_HT(chan)) {
3895 if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
3896 mode = AP_MODE_N_ONLY;
3897 else if (IEEE80211_IS_CHAN_5GHZ(chan))
3898 mode = AP_MODE_AandN;
3899 else if (vap->iv_flags & IEEE80211_F_PUREG)
3900 mode = AP_MODE_GandN;
3902 mode = AP_MODE_BandGandN;
3903 } else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3904 if (vap->iv_flags & IEEE80211_F_PUREG)
3905 mode = AP_MODE_G_ONLY;
3907 mode = AP_MODE_MIXED;
3908 } else if (IEEE80211_IS_CHAN_B(chan))
3909 mode = AP_MODE_B_ONLY;
3910 else if (IEEE80211_IS_CHAN_A(chan))
3911 mode = AP_MODE_A_ONLY;
3913 mode = AP_MODE_MIXED; /* XXX should not happen? */
3918 mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
3920 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
3921 return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
3925 * Set/change channels.
3928 mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
3930 struct mwl_hal *mh = sc->sc_mh;
3931 struct ifnet *ifp = sc->sc_ifp;
3932 struct ieee80211com *ic = ifp->if_l2com;
3933 MWL_HAL_CHANNEL hchan;
3936 DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
3937 __func__, chan->ic_freq, chan->ic_flags);
3940 * Convert to a HAL channel description with
3941 * the flags constrained to reflect the current
3944 mwl_mapchan(&hchan, chan);
3945 mwl_hal_intrset(mh, 0); /* disable interrupts */
3947 mwl_draintxq(sc); /* clear pending tx frames */
3949 mwl_hal_setchannel(mh, &hchan);
3951 * Tx power is cap'd by the regulatory setting and
3952 * possibly a user-set limit. We pass the min of
3953 * these to the hal to apply them to the cal data
3957 maxtxpow = 2*chan->ic_maxregpower;
3958 if (maxtxpow > ic->ic_txpowlimit)
3959 maxtxpow = ic->ic_txpowlimit;
3960 mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
3961 /* NB: potentially change mcast/mgt rates */
3962 mwl_setcurchanrates(sc);
3965 * Update internal state.
3967 sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
3968 sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
3969 if (IEEE80211_IS_CHAN_A(chan)) {
3970 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
3971 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
3972 } else if (IEEE80211_IS_CHAN_ANYG(chan)) {
3973 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
3974 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
3976 sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
3977 sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
3979 sc->sc_curchan = hchan;
3980 mwl_hal_intrset(mh, sc->sc_imask);
3986 mwl_scan_start(struct ieee80211com *ic)
3988 struct ifnet *ifp = ic->ic_ifp;
3989 struct mwl_softc *sc = ifp->if_softc;
3991 DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
3995 mwl_scan_end(struct ieee80211com *ic)
3997 struct ifnet *ifp = ic->ic_ifp;
3998 struct mwl_softc *sc = ifp->if_softc;
4000 DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
4004 mwl_set_channel(struct ieee80211com *ic)
4006 struct ifnet *ifp = ic->ic_ifp;
4007 struct mwl_softc *sc = ifp->if_softc;
4009 (void) mwl_chan_set(sc, ic->ic_curchan);
4013 * Handle a channel switch request. We inform the firmware
4014 * and mark the global state to suppress various actions.
4015 * NB: we issue only one request to the fw; we may be called
4016 * multiple times if there are multiple vap's.
4019 mwl_startcsa(struct ieee80211vap *vap)
4021 struct ieee80211com *ic = vap->iv_ic;
4022 struct mwl_softc *sc = ic->ic_ifp->if_softc;
4023 MWL_HAL_CHANNEL hchan;
4025 if (sc->sc_csapending)
4028 mwl_mapchan(&hchan, ic->ic_csa_newchan);
4029 /* 1 =>'s quiet channel */
4030 mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
4031 sc->sc_csapending = 1;
4035 * Plumb any static WEP key for the station. This is
4036 * necessary as we must propagate the key from the
4037 * global key table of the vap to each sta db entry.
4040 mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4042 if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
4043 IEEE80211_F_PRIVACY &&
4044 vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
4045 vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
4046 (void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac);
4050 mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
4052 #define WME(ie) ((const struct ieee80211_wme_info *) ie)
4053 struct ieee80211vap *vap = ni->ni_vap;
4054 struct mwl_hal_vap *hvap;
4057 if (vap->iv_opmode == IEEE80211_M_WDS) {
4059 * WDS vap's do not have a f/w vap; instead they piggyback
4060 * on an AP vap and we must install the sta db entry and
4061 * crypto state using that AP's handle (the WDS vap has none).
4063 hvap = MWL_VAP(vap)->mv_ap_hvap;
4065 hvap = MWL_VAP(vap)->mv_hvap;
4066 error = mwl_hal_newstation(hvap, ni->ni_macaddr,
4068 ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
4069 ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
4072 * Setup security for this station. For sta mode this is
4073 * needed even though do the same thing on transition to
4074 * AUTH state because the call to mwl_hal_newstation
4075 * clobbers the crypto state we setup.
4077 mwl_setanywepkey(vap, ni->ni_macaddr);
4084 mwl_setglobalkeys(struct ieee80211vap *vap)
4086 struct ieee80211_key *wk;
4088 wk = &vap->iv_nw_keys[0];
4089 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
4090 if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
4091 (void) mwl_key_set(vap, wk, vap->iv_myaddr);
4095 * Convert a legacy rate set to a firmware bitmask.
4098 get_rate_bitmap(const struct ieee80211_rateset *rs)
4104 for (i = 0; i < rs->rs_nrates; i++)
4105 switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
4106 case 2: rates |= 0x001; break;
4107 case 4: rates |= 0x002; break;
4108 case 11: rates |= 0x004; break;
4109 case 22: rates |= 0x008; break;
4110 case 44: rates |= 0x010; break;
4111 case 12: rates |= 0x020; break;
4112 case 18: rates |= 0x040; break;
4113 case 24: rates |= 0x080; break;
4114 case 36: rates |= 0x100; break;
4115 case 48: rates |= 0x200; break;
4116 case 72: rates |= 0x400; break;
4117 case 96: rates |= 0x800; break;
4118 case 108: rates |= 0x1000; break;
4124 * Construct an HT firmware bitmask from an HT rate set.
4127 get_htrate_bitmap(const struct ieee80211_htrateset *rs)
4133 for (i = 0; i < rs->rs_nrates; i++) {
4134 if (rs->rs_rates[i] < 16)
4135 rates |= 1<<rs->rs_rates[i];
4141 * Craft station database entry for station.
4142 * NB: use host byte order here, the hal handles byte swapping.
4144 static MWL_HAL_PEERINFO *
4145 mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
4147 const struct ieee80211vap *vap = ni->ni_vap;
4149 memset(pi, 0, sizeof(*pi));
4150 pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
4151 pi->CapInfo = ni->ni_capinfo;
4152 if (ni->ni_flags & IEEE80211_NODE_HT) {
4153 /* HT capabilities, etc */
4154 pi->HTCapabilitiesInfo = ni->ni_htcap;
4155 /* XXX pi.HTCapabilitiesInfo */
4156 pi->MacHTParamInfo = ni->ni_htparam;
4157 pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
4158 pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
4159 pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
4160 pi->AddHtInfo.OpMode = ni->ni_htopmode;
4161 pi->AddHtInfo.stbc = ni->ni_htstbc;
4163 /* constrain according to local configuration */
4164 if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
4165 pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
4166 if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
4167 pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
4168 if (ni->ni_chw != 40)
4169 pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
4175 * Re-create the local sta db entry for a vap to ensure
4176 * up to date WME state is pushed to the firmware. Because
4177 * this resets crypto state this must be followed by a
4178 * reload of any keys in the global key table.
4181 mwl_localstadb(struct ieee80211vap *vap)
4183 #define WME(ie) ((const struct ieee80211_wme_info *) ie)
4184 struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
4185 struct ieee80211_node *bss;
4186 MWL_HAL_PEERINFO pi;
4189 switch (vap->iv_opmode) {
4190 case IEEE80211_M_STA:
4192 error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
4193 vap->iv_state == IEEE80211_S_RUN ?
4194 mkpeerinfo(&pi, bss) : NULL,
4195 (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
4196 bss->ni_ies.wme_ie != NULL ?
4197 WME(bss->ni_ies.wme_ie)->wme_info : 0);
4199 mwl_setglobalkeys(vap);
4201 case IEEE80211_M_HOSTAP:
4202 case IEEE80211_M_MBSS:
4203 error = mwl_hal_newstation(hvap, vap->iv_myaddr,
4204 0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
4206 mwl_setglobalkeys(vap);
4217 mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4219 struct mwl_vap *mvp = MWL_VAP(vap);
4220 struct mwl_hal_vap *hvap = mvp->mv_hvap;
4221 struct ieee80211com *ic = vap->iv_ic;
4222 struct ieee80211_node *ni = NULL;
4223 struct ifnet *ifp = ic->ic_ifp;
4224 struct mwl_softc *sc = ifp->if_softc;
4225 struct mwl_hal *mh = sc->sc_mh;
4226 enum ieee80211_state ostate = vap->iv_state;
4229 DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
4230 vap->iv_ifp->if_xname, __func__,
4231 ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
4233 callout_stop(&sc->sc_timer);
4235 * Clear current radar detection state.
4237 if (ostate == IEEE80211_S_CAC) {
4238 /* stop quiet mode radar detection */
4239 mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
4240 } else if (sc->sc_radarena) {
4241 /* stop in-service radar detection */
4242 mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
4243 sc->sc_radarena = 0;
4246 * Carry out per-state actions before doing net80211 work.
4248 if (nstate == IEEE80211_S_INIT) {
4249 /* NB: only ap+sta vap's have a fw entity */
4252 } else if (nstate == IEEE80211_S_SCAN) {
4253 mwl_hal_start(hvap);
4254 /* NB: this disables beacon frames */
4255 mwl_hal_setinframode(hvap);
4256 } else if (nstate == IEEE80211_S_AUTH) {
4258 * Must create a sta db entry in case a WEP key needs to
4259 * be plumbed. This entry will be overwritten if we
4260 * associate; otherwise it will be reclaimed on node free.
4263 MWL_NODE(ni)->mn_hvap = hvap;
4264 (void) mwl_peerstadb(ni, 0, 0, NULL);
4265 } else if (nstate == IEEE80211_S_CSA) {
4266 /* XXX move to below? */
4267 if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
4268 vap->iv_opmode == IEEE80211_M_MBSS)
4270 } else if (nstate == IEEE80211_S_CAC) {
4271 /* XXX move to below? */
4272 /* stop ap xmit and enable quiet mode radar detection */
4273 mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
4277 * Invoke the parent method to do net80211 work.
4279 error = mvp->mv_newstate(vap, nstate, arg);
4282 * Carry out work that must be done after net80211 runs;
4283 * this work requires up to date state (e.g. iv_bss).
4285 if (error == 0 && nstate == IEEE80211_S_RUN) {
4286 /* NB: collect bss node again, it may have changed */
4289 DPRINTF(sc, MWL_DEBUG_STATE,
4290 "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
4291 "capinfo 0x%04x chan %d\n",
4292 vap->iv_ifp->if_xname, __func__, vap->iv_flags,
4293 ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
4294 ieee80211_chan2ieee(ic, ic->ic_curchan));
4297 * Recreate local sta db entry to update WME/HT state.
4299 mwl_localstadb(vap);
4300 switch (vap->iv_opmode) {
4301 case IEEE80211_M_HOSTAP:
4302 case IEEE80211_M_MBSS:
4303 if (ostate == IEEE80211_S_CAC) {
4304 /* enable in-service radar detection */
4305 mwl_hal_setradardetection(mh,
4306 DR_IN_SERVICE_MONITOR_START);
4307 sc->sc_radarena = 1;
4310 * Allocate and setup the beacon frame
4311 * (and related state).
4313 error = mwl_reset_vap(vap, IEEE80211_S_RUN);
4315 DPRINTF(sc, MWL_DEBUG_STATE,
4316 "%s: beacon setup failed, error %d\n",
4320 /* NB: must be after setting up beacon */
4321 mwl_hal_start(hvap);
4323 case IEEE80211_M_STA:
4324 DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
4325 vap->iv_ifp->if_xname, __func__, ni->ni_associd);
4327 * Set state now that we're associated.
4329 mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
4331 mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
4332 if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4333 sc->sc_ndwdsvaps++ == 0)
4334 mwl_hal_setdwds(mh, 1);
4336 case IEEE80211_M_WDS:
4337 DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
4338 vap->iv_ifp->if_xname, __func__,
4339 ether_sprintf(ni->ni_bssid));
4340 mwl_seteapolformat(vap);
4346 * Set CS mode according to operating channel;
4347 * this mostly an optimization for 5GHz.
4349 * NB: must follow mwl_hal_start which resets csmode
4351 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
4352 mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
4354 mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
4356 * Start timer to prod firmware.
4358 if (sc->sc_ageinterval != 0)
4359 callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
4360 mwl_agestations, sc);
4361 } else if (nstate == IEEE80211_S_SLEEP) {
4362 /* XXX set chip in power save */
4363 } else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
4364 --sc->sc_ndwdsvaps == 0)
4365 mwl_hal_setdwds(mh, 0);
4371 * Manage station id's; these are separate from AID's
4372 * as AID's may have values out of the range of possible
4373 * station id's acceptable to the firmware.
4376 allocstaid(struct mwl_softc *sc, int aid)
4380 if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
4381 /* NB: don't use 0 */
4382 for (staid = 1; staid < MWL_MAXSTAID; staid++)
4383 if (isclr(sc->sc_staid, staid))
4387 setbit(sc->sc_staid, staid);
4392 delstaid(struct mwl_softc *sc, int staid)
4394 clrbit(sc->sc_staid, staid);
4398 * Setup driver-specific state for a newly associated node.
4399 * Note that we're called also on a re-associate, the isnew
4400 * param tells us if this is the first time or not.
4403 mwl_newassoc(struct ieee80211_node *ni, int isnew)
4405 struct ieee80211vap *vap = ni->ni_vap;
4406 struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
4407 struct mwl_node *mn = MWL_NODE(ni);
4408 MWL_HAL_PEERINFO pi;
4412 aid = IEEE80211_AID(ni->ni_associd);
4414 mn->mn_staid = allocstaid(sc, aid);
4415 mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
4418 /* XXX reset BA stream? */
4420 DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
4421 __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
4422 error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
4424 DPRINTF(sc, MWL_DEBUG_NODE,
4425 "%s: error %d creating sta db entry\n",
4427 /* XXX how to deal with error? */
4432 * Periodically poke the firmware to age out station state
4433 * (power save queues, pending tx aggregates).
4436 mwl_agestations(void *arg)
4438 struct mwl_softc *sc = arg;
4440 mwl_hal_setkeepalive(sc->sc_mh);
4441 if (sc->sc_ageinterval != 0) /* NB: catch dynamic changes */
4442 callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
4445 static const struct mwl_hal_channel *
4446 findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
4450 for (i = 0; i < ci->nchannels; i++) {
4451 const struct mwl_hal_channel *hc = &ci->channels[i];
4452 if (hc->ieee == ieee)
4459 mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
4460 int nchan, struct ieee80211_channel chans[])
4462 struct mwl_softc *sc = ic->ic_ifp->if_softc;
4463 struct mwl_hal *mh = sc->sc_mh;
4464 const MWL_HAL_CHANNELINFO *ci;
4467 for (i = 0; i < nchan; i++) {
4468 struct ieee80211_channel *c = &chans[i];
4469 const struct mwl_hal_channel *hc;
4471 if (IEEE80211_IS_CHAN_2GHZ(c)) {
4472 mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
4473 IEEE80211_IS_CHAN_HT40(c) ?
4474 MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4475 } else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4476 mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
4477 IEEE80211_IS_CHAN_HT40(c) ?
4478 MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
4480 if_printf(ic->ic_ifp,
4481 "%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
4482 __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
4486 * Verify channel has cal data and cap tx power.
4488 hc = findhalchannel(ci, c->ic_ieee);
4490 if (c->ic_maxpower > 2*hc->maxTxPow)
4491 c->ic_maxpower = 2*hc->maxTxPow;
4494 if (IEEE80211_IS_CHAN_HT40(c)) {
4496 * Look for the extension channel since the
4497 * hal table only has the primary channel.
4499 hc = findhalchannel(ci, c->ic_extieee);
4501 if (c->ic_maxpower > 2*hc->maxTxPow)
4502 c->ic_maxpower = 2*hc->maxTxPow;
4506 if_printf(ic->ic_ifp,
4507 "%s: no cal data for channel %u ext %u freq %u/0x%x\n",
4508 __func__, c->ic_ieee, c->ic_extieee,
4509 c->ic_freq, c->ic_flags);
4517 #define IEEE80211_CHAN_HTG (IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
4518 #define IEEE80211_CHAN_HTA (IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
4521 addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
4524 c->ic_flags = flags;
4527 c->ic_maxpower = 2*txpow;
4528 c->ic_maxregpower = txpow;
4531 static const struct ieee80211_channel *
4532 findchannel(const struct ieee80211_channel chans[], int nchans,
4533 int freq, int flags)
4535 const struct ieee80211_channel *c;
4538 for (i = 0; i < nchans; i++) {
4540 if (c->ic_freq == freq && c->ic_flags == flags)
4547 addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4548 const MWL_HAL_CHANNELINFO *ci, int flags)
4550 struct ieee80211_channel *c;
4551 const struct ieee80211_channel *extc;
4552 const struct mwl_hal_channel *hc;
4555 c = &chans[*nchans];
4557 flags &= ~IEEE80211_CHAN_HT;
4558 for (i = 0; i < ci->nchannels; i++) {
4560 * Each entry defines an HT40 channel pair; find the
4561 * extension channel above and the insert the pair.
4563 hc = &ci->channels[i];
4564 extc = findchannel(chans, *nchans, hc->freq+20,
4565 flags | IEEE80211_CHAN_HT20);
4567 if (*nchans >= maxchans)
4569 addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
4570 hc->ieee, hc->maxTxPow);
4571 c->ic_extieee = extc->ic_ieee;
4573 if (*nchans >= maxchans)
4575 addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
4576 extc->ic_ieee, hc->maxTxPow);
4577 c->ic_extieee = hc->ieee;
4584 addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
4585 const MWL_HAL_CHANNELINFO *ci, int flags)
4587 struct ieee80211_channel *c;
4590 c = &chans[*nchans];
4592 for (i = 0; i < ci->nchannels; i++) {
4593 const struct mwl_hal_channel *hc;
4595 hc = &ci->channels[i];
4596 if (*nchans >= maxchans)
4598 addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
4600 if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
4601 /* g channel have a separate b-only entry */
4602 if (*nchans >= maxchans)
4605 c[-1].ic_flags = IEEE80211_CHAN_B;
4608 if (flags == IEEE80211_CHAN_HTG) {
4609 /* HT g channel have a separate g-only entry */
4610 if (*nchans >= maxchans)
4612 c[-1].ic_flags = IEEE80211_CHAN_G;
4614 c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4615 c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */
4618 if (flags == IEEE80211_CHAN_HTA) {
4619 /* HT a channel have a separate a-only entry */
4620 if (*nchans >= maxchans)
4622 c[-1].ic_flags = IEEE80211_CHAN_A;
4624 c[0].ic_flags &= ~IEEE80211_CHAN_HT;
4625 c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */
4632 getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
4633 struct ieee80211_channel chans[])
4635 const MWL_HAL_CHANNELINFO *ci;
4638 * Use the channel info from the hal to craft the
4639 * channel list. Note that we pass back an unsorted
4640 * list; the caller is required to sort it for us
4644 if (mwl_hal_getchannelinfo(sc->sc_mh,
4645 MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4646 addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4647 if (mwl_hal_getchannelinfo(sc->sc_mh,
4648 MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
4649 addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4650 if (mwl_hal_getchannelinfo(sc->sc_mh,
4651 MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4652 addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
4653 if (mwl_hal_getchannelinfo(sc->sc_mh,
4654 MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
4655 addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
4659 mwl_getradiocaps(struct ieee80211com *ic,
4660 int maxchans, int *nchans, struct ieee80211_channel chans[])
4662 struct mwl_softc *sc = ic->ic_ifp->if_softc;
4664 getchannels(sc, maxchans, nchans, chans);
4668 mwl_getchannels(struct mwl_softc *sc)
4670 struct ifnet *ifp = sc->sc_ifp;
4671 struct ieee80211com *ic = ifp->if_l2com;
4674 * Use the channel info from the hal to craft the
4675 * channel list for net80211. Note that we pass up
4676 * an unsorted list; net80211 will sort it for us.
4678 memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
4680 getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
4682 ic->ic_regdomain.regdomain = SKU_DEBUG;
4683 ic->ic_regdomain.country = CTRY_DEFAULT;
4684 ic->ic_regdomain.location = 'I';
4685 ic->ic_regdomain.isocc[0] = ' '; /* XXX? */
4686 ic->ic_regdomain.isocc[1] = ' ';
4687 return (ic->ic_nchans == 0 ? EIO : 0);
4689 #undef IEEE80211_CHAN_HTA
4690 #undef IEEE80211_CHAN_HTG
4694 mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
4696 const struct mwl_rxdesc *ds = bf->bf_desc;
4697 uint32_t status = le32toh(ds->Status);
4699 printf("R[%2u] (DS.V:%p DS.P:%p) NEXT:%08x DATA:%08x RC:%02x%s\n"
4700 " STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
4701 ix, ds, (const struct mwl_desc *)bf->bf_daddr,
4702 le32toh(ds->pPhysNext), le32toh(ds->pPhysBuffData),
4704 ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
4705 "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
4706 ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
4707 ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
4711 mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
4713 const struct mwl_txdesc *ds = bf->bf_desc;
4714 uint32_t status = le32toh(ds->Status);
4716 printf("Q%u[%3u]", qnum, ix);
4717 printf(" (DS.V:%p DS.P:%p)\n",
4718 ds, (const struct mwl_txdesc *)bf->bf_daddr);
4719 printf(" NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
4720 le32toh(ds->pPhysNext),
4721 le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
4722 status & EAGLE_TXD_STATUS_USED ?
4723 "" : (status & 3) != 0 ? " *" : " !");
4724 printf(" RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
4725 ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
4726 le32toh(ds->SapPktInfo), le16toh(ds->Format));
4728 printf(" MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
4729 , le32toh(ds->multiframes)
4730 , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
4731 , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
4732 , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
4734 printf(" DATA:%08x %08x %08x %08x %08x %08x\n"
4735 , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
4736 , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
4737 , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
4741 { const uint8_t *cp = (const uint8_t *) ds;
4743 for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
4744 printf("%02x ", cp[i]);
4745 if (((i+1) % 16) == 0)
4752 #endif /* MWL_DEBUG */
4756 mwl_txq_dump(struct mwl_txq *txq)
4758 struct mwl_txbuf *bf;
4762 STAILQ_FOREACH(bf, &txq->active, bf_list) {
4763 struct mwl_txdesc *ds = bf->bf_desc;
4764 MWL_TXDESC_SYNC(txq, ds,
4765 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4767 mwl_printtxbuf(bf, txq->qnum, i);
4771 MWL_TXQ_UNLOCK(txq);
4776 mwl_watchdog(void *arg)
4778 struct mwl_softc *sc;
4782 callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
4783 if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
4787 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
4788 if (mwl_hal_setkeepalive(sc->sc_mh))
4789 if_printf(ifp, "transmit timeout (firmware hung?)\n");
4791 if_printf(ifp, "transmit timeout\n");
4794 mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
4797 sc->sc_stats.mst_watchdog++;
4803 * Diagnostic interface to the HAL. This is used by various
4804 * tools to do things like retrieve register contents for
4805 * debugging. The mechanism is intentionally opaque so that
4806 * it can change frequently w/o concern for compatiblity.
4809 mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
4811 struct mwl_hal *mh = sc->sc_mh;
4812 u_int id = md->md_id & MWL_DIAG_ID;
4813 void *indata = NULL;
4814 void *outdata = NULL;
4815 u_int32_t insize = md->md_in_size;
4816 u_int32_t outsize = md->md_out_size;
4819 if (md->md_id & MWL_DIAG_IN) {
4823 indata = malloc(insize, M_TEMP, M_NOWAIT);
4824 if (indata == NULL) {
4828 error = copyin(md->md_in_data, indata, insize);
4832 if (md->md_id & MWL_DIAG_DYN) {
4834 * Allocate a buffer for the results (otherwise the HAL
4835 * returns a pointer to a buffer where we can read the
4836 * results). Note that we depend on the HAL leaving this
4837 * pointer for us to use below in reclaiming the buffer;
4838 * may want to be more defensive.
4840 outdata = malloc(outsize, M_TEMP, M_NOWAIT);
4841 if (outdata == NULL) {
4846 if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
4847 if (outsize < md->md_out_size)
4848 md->md_out_size = outsize;
4849 if (outdata != NULL)
4850 error = copyout(outdata, md->md_out_data,
4856 if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
4857 free(indata, M_TEMP);
4858 if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
4859 free(outdata, M_TEMP);
4864 mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
4866 struct mwl_hal *mh = sc->sc_mh;
4869 MWL_LOCK_ASSERT(sc);
4871 if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
4872 device_printf(sc->sc_dev, "unable to load firmware\n");
4875 if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
4876 device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
4879 error = mwl_setupdma(sc);
4881 /* NB: mwl_setupdma prints a msg */
4885 * Reset tx/rx data structures; after reload we must
4886 * re-start the driver's notion of the next xmit/recv.
4888 mwl_draintxq(sc); /* clear pending frames */
4889 mwl_resettxq(sc); /* rebuild tx q lists */
4890 sc->sc_rxnext = NULL; /* force rx to start at the list head */
4893 #endif /* MWL_DIAGAPI */
4896 mwl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
4898 #define IS_RUNNING(ifp) \
4899 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
4900 struct mwl_softc *sc = ifp->if_softc;
4901 struct ieee80211com *ic = ifp->if_l2com;
4902 struct ifreq *ifr = (struct ifreq *)data;
4903 int error = 0, startall;
4909 if (IS_RUNNING(ifp)) {
4911 * To avoid rescanning another access point,
4912 * do not call mwl_init() here. Instead,
4913 * only reflect promisc mode settings.
4916 } else if (ifp->if_flags & IFF_UP) {
4918 * Beware of being called during attach/detach
4919 * to reset promiscuous mode. In that case we
4920 * will still be marked UP but not RUNNING.
4921 * However trying to re-init the interface
4922 * is the wrong thing to do as we've already
4923 * torn down much of our state. There's
4924 * probably a better way to deal with this.
4926 if (!sc->sc_invalid) {
4927 mwl_init_locked(sc); /* XXX lose error */
4931 mwl_stop_locked(ifp, 1);
4934 ieee80211_start_all(ic);
4937 mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
4938 /* NB: embed these numbers to get a consistent view */
4939 sc->sc_stats.mst_tx_packets = ifp->if_opackets;
4940 sc->sc_stats.mst_rx_packets = ifp->if_ipackets;
4942 * NB: Drop the softc lock in case of a page fault;
4943 * we'll accept any potential inconsisentcy in the
4944 * statistics. The alternative is to copy the data
4945 * to a local structure.
4947 return copyout(&sc->sc_stats,
4948 ifr->ifr_data, sizeof (sc->sc_stats));
4951 /* XXX check privs */
4952 return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
4954 /* XXX check privs */
4956 error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
4959 #endif /* MWL_DIAGAPI */
4961 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4964 error = ether_ioctl(ifp, cmd, data);
4976 mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
4978 struct mwl_softc *sc = arg1;
4981 debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
4982 error = sysctl_handle_int(oidp, &debug, 0, req);
4983 if (error || !req->newptr)
4985 mwl_hal_setdebug(sc->sc_mh, debug >> 24);
4986 sc->sc_debug = debug & 0x00ffffff;
4989 #endif /* MWL_DEBUG */
4992 mwl_sysctlattach(struct mwl_softc *sc)
4995 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
4996 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
4998 sc->sc_debug = mwl_debug;
4999 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
5000 "debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
5001 mwl_sysctl_debug, "I", "control debugging printfs");
5006 * Announce various information on device/driver attach.
5009 mwl_announce(struct mwl_softc *sc)
5011 struct ifnet *ifp = sc->sc_ifp;
5013 if_printf(ifp, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
5014 sc->sc_hwspecs.hwVersion,
5015 (sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
5016 (sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
5017 (sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
5018 (sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
5019 sc->sc_hwspecs.regionCode);
5020 sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
5024 for (i = 0; i <= WME_AC_VO; i++) {
5025 struct mwl_txq *txq = sc->sc_ac2q[i];
5026 if_printf(ifp, "Use hw queue %u for %s traffic\n",
5027 txq->qnum, ieee80211_wme_acnames[i]);
5030 if (bootverbose || mwl_rxdesc != MWL_RXDESC)
5031 if_printf(ifp, "using %u rx descriptors\n", mwl_rxdesc);
5032 if (bootverbose || mwl_rxbuf != MWL_RXBUF)
5033 if_printf(ifp, "using %u rx buffers\n", mwl_rxbuf);
5034 if (bootverbose || mwl_txbuf != MWL_TXBUF)
5035 if_printf(ifp, "using %u tx buffers\n", mwl_txbuf);
5036 if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
5037 if_printf(ifp, "multi-bss support\n");
5038 #ifdef MWL_TX_NODROP
5040 if_printf(ifp, "no tx drop\n");