4 * Copyright (c) 2004-2006
5 * Damien Bergamini <damien.bergamini@free.fr>. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 * Intel(R) PRO/Wireless 2100 MiniPCI driver
35 * http://www.intel.com/network/connectivity/products/wireless/prowireless_mobile.htm
38 #include <sys/param.h>
39 #include <sys/sysctl.h>
40 #include <sys/sockio.h>
42 #include <sys/kernel.h>
43 #include <sys/socket.h>
44 #include <sys/systm.h>
45 #include <sys/malloc.h>
46 #include <sys/queue.h>
47 #include <sys/taskqueue.h>
48 #include <sys/module.h>
50 #include <sys/endian.h>
51 #include <sys/linker.h>
52 #include <sys/firmware.h>
54 #include <machine/bus.h>
55 #include <machine/resource.h>
58 #include <dev/pci/pcireg.h>
59 #include <dev/pci/pcivar.h>
63 #include <net/if_arp.h>
64 #include <net/ethernet.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_types.h>
69 #include <net80211/ieee80211_var.h>
70 #include <net80211/ieee80211_radiotap.h>
72 #include <netinet/in.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/in_var.h>
75 #include <netinet/ip.h>
76 #include <netinet/if_ether.h>
78 #include <dev/ipw/if_ipwreg.h>
79 #include <dev/ipw/if_ipwvar.h>
82 #define DPRINTF(x) do { if (ipw_debug > 0) printf x; } while (0)
83 #define DPRINTFN(n, x) do { if (ipw_debug >= (n)) printf x; } while (0)
85 SYSCTL_INT(_debug, OID_AUTO, ipw, CTLFLAG_RW, &ipw_debug, 0, "ipw debug level");
88 #define DPRINTFN(n, x)
91 MODULE_DEPEND(ipw, pci, 1, 1, 1);
92 MODULE_DEPEND(ipw, wlan, 1, 1, 1);
93 MODULE_DEPEND(ipw, firmware, 1, 1, 1);
101 static const struct ipw_ident ipw_ident_table[] = {
102 { 0x8086, 0x1043, "Intel(R) PRO/Wireless 2100 MiniPCI" },
107 static int ipw_dma_alloc(struct ipw_softc *);
108 static void ipw_release(struct ipw_softc *);
109 static int ipw_media_change(struct ifnet *);
110 static void ipw_media_status(struct ifnet *, struct ifmediareq *);
111 static int ipw_newstate(struct ieee80211com *, enum ieee80211_state, int);
112 static uint16_t ipw_read_prom_word(struct ipw_softc *, uint8_t);
113 static void ipw_command_intr(struct ipw_softc *, struct ipw_soft_buf *);
114 static void ipw_newstate_intr(struct ipw_softc *, struct ipw_soft_buf *);
115 static void ipw_data_intr(struct ipw_softc *, struct ipw_status *,
116 struct ipw_soft_bd *, struct ipw_soft_buf *);
117 static void ipw_rx_intr(struct ipw_softc *);
118 static void ipw_release_sbd(struct ipw_softc *, struct ipw_soft_bd *);
119 static void ipw_tx_intr(struct ipw_softc *);
120 static void ipw_intr(void *);
121 static void ipw_dma_map_addr(void *, bus_dma_segment_t *, int, int);
122 static int ipw_cmd(struct ipw_softc *, uint32_t, void *, uint32_t);
123 static int ipw_tx_start(struct ifnet *, struct mbuf *,
124 struct ieee80211_node *);
125 static void ipw_start(struct ifnet *);
126 static void ipw_watchdog(struct ifnet *);
127 static int ipw_ioctl(struct ifnet *, u_long, caddr_t);
128 static void ipw_stop_master(struct ipw_softc *);
129 static int ipw_reset(struct ipw_softc *);
130 static int ipw_load_ucode(struct ipw_softc *, const char *, int);
131 static int ipw_load_firmware(struct ipw_softc *, const char *, int);
132 static int ipw_config(struct ipw_softc *);
133 static void ipw_init_task(void *, int);
134 static void ipw_init(void *);
135 static void ipw_stop(void *);
136 static int ipw_sysctl_stats(SYSCTL_HANDLER_ARGS);
137 static int ipw_sysctl_radio(SYSCTL_HANDLER_ARGS);
138 static uint32_t ipw_read_table1(struct ipw_softc *, uint32_t);
139 static void ipw_write_table1(struct ipw_softc *, uint32_t, uint32_t);
140 static int ipw_read_table2(struct ipw_softc *, uint32_t, void *,
142 static void ipw_read_mem_1(struct ipw_softc *, bus_size_t, uint8_t *,
144 static void ipw_write_mem_1(struct ipw_softc *, bus_size_t,
145 const uint8_t *, bus_size_t);
147 static int ipw_probe(device_t);
148 static int ipw_attach(device_t);
149 static int ipw_detach(device_t);
150 static int ipw_shutdown(device_t);
151 static int ipw_suspend(device_t);
152 static int ipw_resume(device_t);
154 static device_method_t ipw_methods[] = {
155 /* Device interface */
156 DEVMETHOD(device_probe, ipw_probe),
157 DEVMETHOD(device_attach, ipw_attach),
158 DEVMETHOD(device_detach, ipw_detach),
159 DEVMETHOD(device_shutdown, ipw_shutdown),
160 DEVMETHOD(device_suspend, ipw_suspend),
161 DEVMETHOD(device_resume, ipw_resume),
166 static driver_t ipw_driver = {
169 sizeof (struct ipw_softc)
172 static devclass_t ipw_devclass;
174 DRIVER_MODULE(ipw, pci, ipw_driver, ipw_devclass, 0, 0);
177 ipw_probe(device_t dev)
179 const struct ipw_ident *ident;
181 for (ident = ipw_ident_table; ident->name != NULL; ident++) {
182 if (pci_get_vendor(dev) == ident->vendor &&
183 pci_get_device(dev) == ident->device) {
184 device_set_desc(dev, ident->name);
191 /* Base Address Register */
192 #define IPW_PCI_BAR0 0x10
195 ipw_attach(device_t dev)
197 struct ipw_softc *sc = device_get_softc(dev);
199 struct ieee80211com *ic = &sc->sc_ic;
200 struct ieee80211_channel *c;
206 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
207 MTX_DEF | MTX_RECURSE);
209 TASK_INIT(&sc->sc_init_task, 0, ipw_init_task, sc);
211 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
212 device_printf(dev, "chip is in D%d power mode "
213 "-- setting to D0\n", pci_get_powerstate(dev));
214 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
217 pci_write_config(dev, 0x41, 0, 1);
219 /* enable bus-mastering */
220 pci_enable_busmaster(dev);
222 sc->mem_rid = IPW_PCI_BAR0;
223 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
225 if (sc->mem == NULL) {
226 device_printf(dev, "could not allocate memory resource\n");
230 sc->sc_st = rman_get_bustag(sc->mem);
231 sc->sc_sh = rman_get_bushandle(sc->mem);
234 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
235 RF_ACTIVE | RF_SHAREABLE);
236 if (sc->irq == NULL) {
237 device_printf(dev, "could not allocate interrupt resource\n");
241 if (ipw_reset(sc) != 0) {
242 device_printf(dev, "could not reset adapter\n");
246 if (ipw_dma_alloc(sc) != 0) {
247 device_printf(dev, "could not allocate DMA resources\n");
251 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
253 device_printf(dev, "can not if_alloc()\n");
258 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
259 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
260 ifp->if_init = ipw_init;
261 ifp->if_ioctl = ipw_ioctl;
262 ifp->if_start = ipw_start;
263 ifp->if_watchdog = ipw_watchdog;
264 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
265 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
266 IFQ_SET_READY(&ifp->if_snd);
269 ic->ic_phytype = IEEE80211_T_DS;
270 ic->ic_opmode = IEEE80211_M_STA;
271 ic->ic_state = IEEE80211_S_INIT;
273 /* set device capabilities */
275 IEEE80211_C_IBSS | /* IBSS mode supported */
276 IEEE80211_C_MONITOR | /* monitor mode supported */
277 IEEE80211_C_TXPMGT | /* tx power management */
278 IEEE80211_C_SHPREAMBLE; /* short preamble supported */
280 /* read MAC address from EEPROM */
281 val = ipw_read_prom_word(sc, IPW_EEPROM_MAC + 0);
282 ic->ic_myaddr[0] = val >> 8;
283 ic->ic_myaddr[1] = val & 0xff;
284 val = ipw_read_prom_word(sc, IPW_EEPROM_MAC + 1);
285 ic->ic_myaddr[2] = val >> 8;
286 ic->ic_myaddr[3] = val & 0xff;
287 val = ipw_read_prom_word(sc, IPW_EEPROM_MAC + 2);
288 ic->ic_myaddr[4] = val >> 8;
289 ic->ic_myaddr[5] = val & 0xff;
291 /* set supported .11b channels (read from EEPROM) */
292 if ((val = ipw_read_prom_word(sc, IPW_EEPROM_CHANNEL_LIST)) == 0)
293 val = 0x7ff; /* default to channels 1-11 */
295 for (i = 1; i < 16; i++) {
296 if (val & (1 << i)) {
297 c = &ic->ic_channels[ic->ic_nchans++];
298 c->ic_freq = ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
299 c->ic_flags = IEEE80211_CHAN_B;
304 /* check support for radio transmitter switch in EEPROM */
305 if (!(ipw_read_prom_word(sc, IPW_EEPROM_RADIO) & 8))
306 sc->flags |= IPW_FLAG_HAS_RADIO_SWITCH;
308 ieee80211_ifattach(ic);
309 /* override state transition machine */
310 sc->sc_newstate = ic->ic_newstate;
311 ic->ic_newstate = ipw_newstate;
312 ieee80211_media_init(ic, ipw_media_change, ipw_media_status);
314 bpfattach2(ifp, DLT_IEEE802_11_RADIO,
315 sizeof (struct ieee80211_frame) + sizeof (sc->sc_txtap),
318 sc->sc_rxtap_len = sizeof sc->sc_rxtap;
319 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
320 sc->sc_rxtap.wr_ihdr.it_present = htole32(IPW_RX_RADIOTAP_PRESENT);
322 sc->sc_txtap_len = sizeof sc->sc_txtap;
323 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
324 sc->sc_txtap.wt_ihdr.it_present = htole32(IPW_TX_RADIOTAP_PRESENT);
327 * Add a few sysctl knobs.
331 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
332 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "radio",
333 CTLTYPE_INT | CTLFLAG_RD, sc, 0, ipw_sysctl_radio, "I",
334 "radio transmitter switch state (0=off, 1=on)");
336 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
337 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats",
338 CTLTYPE_OPAQUE | CTLFLAG_RD, sc, 0, ipw_sysctl_stats, "S",
341 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
342 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "dwell",
343 CTLFLAG_RW, &sc->dwelltime, 0,
344 "channel dwell time (ms) for AP/station scanning");
347 * Hook our interrupt after all initialization is complete.
349 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
350 NULL, ipw_intr, sc, &sc->sc_ih);
352 device_printf(dev, "could not set up interrupt\n");
357 ieee80211_announce(ic);
361 fail: ipw_detach(dev);
366 ipw_detach(device_t dev)
368 struct ipw_softc *sc = device_get_softc(dev);
369 struct ieee80211com *ic = &sc->sc_ic;
370 struct ifnet *ifp = ic->ic_ifp;
376 ieee80211_ifdetach(ic);
381 if (sc->irq != NULL) {
382 bus_teardown_intr(dev, sc->irq, sc->sc_ih);
383 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
387 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
392 if (sc->sc_firmware != NULL) {
393 firmware_put(sc->sc_firmware, FIRMWARE_UNLOAD);
394 sc->sc_firmware = NULL;
397 mtx_destroy(&sc->sc_mtx);
403 ipw_dma_alloc(struct ipw_softc *sc)
405 struct ipw_soft_bd *sbd;
406 struct ipw_soft_hdr *shdr;
407 struct ipw_soft_buf *sbuf;
412 * Allocate and map tx ring.
414 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
415 BUS_SPACE_MAXADDR, NULL, NULL, IPW_TBD_SZ, 1, IPW_TBD_SZ, 0, NULL,
416 NULL, &sc->tbd_dmat);
418 device_printf(sc->sc_dev, "could not create tx ring DMA tag\n");
422 error = bus_dmamem_alloc(sc->tbd_dmat, (void **)&sc->tbd_list,
423 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->tbd_map);
425 device_printf(sc->sc_dev,
426 "could not allocate tx ring DMA memory\n");
430 error = bus_dmamap_load(sc->tbd_dmat, sc->tbd_map, sc->tbd_list,
431 IPW_TBD_SZ, ipw_dma_map_addr, &sc->tbd_phys, 0);
433 device_printf(sc->sc_dev, "could not map tx ring DMA memory\n");
438 * Allocate and map rx ring.
440 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
441 BUS_SPACE_MAXADDR, NULL, NULL, IPW_RBD_SZ, 1, IPW_RBD_SZ, 0, NULL,
442 NULL, &sc->rbd_dmat);
444 device_printf(sc->sc_dev, "could not create rx ring DMA tag\n");
448 error = bus_dmamem_alloc(sc->rbd_dmat, (void **)&sc->rbd_list,
449 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->rbd_map);
451 device_printf(sc->sc_dev,
452 "could not allocate rx ring DMA memory\n");
456 error = bus_dmamap_load(sc->rbd_dmat, sc->rbd_map, sc->rbd_list,
457 IPW_RBD_SZ, ipw_dma_map_addr, &sc->rbd_phys, 0);
459 device_printf(sc->sc_dev, "could not map rx ring DMA memory\n");
464 * Allocate and map status ring.
466 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
467 BUS_SPACE_MAXADDR, NULL, NULL, IPW_STATUS_SZ, 1, IPW_STATUS_SZ, 0,
468 NULL, NULL, &sc->status_dmat);
470 device_printf(sc->sc_dev,
471 "could not create status ring DMA tag\n");
475 error = bus_dmamem_alloc(sc->status_dmat, (void **)&sc->status_list,
476 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->status_map);
478 device_printf(sc->sc_dev,
479 "could not allocate status ring DMA memory\n");
483 error = bus_dmamap_load(sc->status_dmat, sc->status_map,
484 sc->status_list, IPW_STATUS_SZ, ipw_dma_map_addr, &sc->status_phys,
487 device_printf(sc->sc_dev,
488 "could not map status ring DMA memory\n");
493 * Allocate command DMA map.
495 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
496 BUS_SPACE_MAXADDR, NULL, NULL, sizeof (struct ipw_cmd), 1,
497 sizeof (struct ipw_cmd), 0, NULL, NULL, &sc->cmd_dmat);
499 device_printf(sc->sc_dev, "could not create command DMA tag\n");
503 error = bus_dmamap_create(sc->cmd_dmat, 0, &sc->cmd_map);
505 device_printf(sc->sc_dev,
506 "could not create command DMA map\n");
511 * Allocate headers DMA maps.
513 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
514 BUS_SPACE_MAXADDR, NULL, NULL, sizeof (struct ipw_hdr), 1,
515 sizeof (struct ipw_hdr), 0, NULL, NULL, &sc->hdr_dmat);
517 device_printf(sc->sc_dev, "could not create header DMA tag\n");
521 SLIST_INIT(&sc->free_shdr);
522 for (i = 0; i < IPW_NDATA; i++) {
523 shdr = &sc->shdr_list[i];
524 error = bus_dmamap_create(sc->hdr_dmat, 0, &shdr->map);
526 device_printf(sc->sc_dev,
527 "could not create header DMA map\n");
530 SLIST_INSERT_HEAD(&sc->free_shdr, shdr, next);
534 * Allocate tx buffers DMA maps.
536 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
537 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IPW_MAX_NSEG, MCLBYTES, 0,
538 NULL, NULL, &sc->txbuf_dmat);
540 device_printf(sc->sc_dev, "could not create tx DMA tag\n");
544 SLIST_INIT(&sc->free_sbuf);
545 for (i = 0; i < IPW_NDATA; i++) {
546 sbuf = &sc->tx_sbuf_list[i];
547 error = bus_dmamap_create(sc->txbuf_dmat, 0, &sbuf->map);
549 device_printf(sc->sc_dev,
550 "could not create tx DMA map\n");
553 SLIST_INSERT_HEAD(&sc->free_sbuf, sbuf, next);
557 * Initialize tx ring.
559 for (i = 0; i < IPW_NTBD; i++) {
560 sbd = &sc->stbd_list[i];
561 sbd->bd = &sc->tbd_list[i];
562 sbd->type = IPW_SBD_TYPE_NOASSOC;
566 * Pre-allocate rx buffers and DMA maps.
568 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
569 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL,
570 NULL, &sc->rxbuf_dmat);
572 device_printf(sc->sc_dev, "could not create rx DMA tag\n");
576 for (i = 0; i < IPW_NRBD; i++) {
577 sbd = &sc->srbd_list[i];
578 sbuf = &sc->rx_sbuf_list[i];
579 sbd->bd = &sc->rbd_list[i];
581 sbuf->m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
582 if (sbuf->m == NULL) {
583 device_printf(sc->sc_dev,
584 "could not allocate rx mbuf\n");
589 error = bus_dmamap_create(sc->rxbuf_dmat, 0, &sbuf->map);
591 device_printf(sc->sc_dev,
592 "could not create rx DMA map\n");
596 error = bus_dmamap_load(sc->rxbuf_dmat, sbuf->map,
597 mtod(sbuf->m, void *), MCLBYTES, ipw_dma_map_addr,
600 device_printf(sc->sc_dev,
601 "could not map rx DMA memory\n");
605 sbd->type = IPW_SBD_TYPE_DATA;
607 sbd->bd->physaddr = htole32(physaddr);
608 sbd->bd->len = htole32(MCLBYTES);
611 bus_dmamap_sync(sc->rbd_dmat, sc->rbd_map, BUS_DMASYNC_PREWRITE);
615 fail: ipw_release(sc);
620 ipw_release(struct ipw_softc *sc)
622 struct ipw_soft_buf *sbuf;
625 if (sc->tbd_dmat != NULL) {
626 if (sc->stbd_list != NULL) {
627 bus_dmamap_unload(sc->tbd_dmat, sc->tbd_map);
628 bus_dmamem_free(sc->tbd_dmat, sc->tbd_list,
631 bus_dma_tag_destroy(sc->tbd_dmat);
634 if (sc->rbd_dmat != NULL) {
635 if (sc->rbd_list != NULL) {
636 bus_dmamap_unload(sc->rbd_dmat, sc->rbd_map);
637 bus_dmamem_free(sc->rbd_dmat, sc->rbd_list,
640 bus_dma_tag_destroy(sc->rbd_dmat);
643 if (sc->status_dmat != NULL) {
644 if (sc->status_list != NULL) {
645 bus_dmamap_unload(sc->status_dmat, sc->status_map);
646 bus_dmamem_free(sc->status_dmat, sc->status_list,
649 bus_dma_tag_destroy(sc->status_dmat);
652 for (i = 0; i < IPW_NTBD; i++)
653 ipw_release_sbd(sc, &sc->stbd_list[i]);
655 if (sc->cmd_dmat != NULL) {
656 bus_dmamap_destroy(sc->cmd_dmat, sc->cmd_map);
657 bus_dma_tag_destroy(sc->cmd_dmat);
660 if (sc->hdr_dmat != NULL) {
661 for (i = 0; i < IPW_NDATA; i++)
662 bus_dmamap_destroy(sc->hdr_dmat, sc->shdr_list[i].map);
663 bus_dma_tag_destroy(sc->hdr_dmat);
666 if (sc->txbuf_dmat != NULL) {
667 for (i = 0; i < IPW_NDATA; i++) {
668 bus_dmamap_destroy(sc->txbuf_dmat,
669 sc->tx_sbuf_list[i].map);
671 bus_dma_tag_destroy(sc->txbuf_dmat);
674 if (sc->rxbuf_dmat != NULL) {
675 for (i = 0; i < IPW_NRBD; i++) {
676 sbuf = &sc->rx_sbuf_list[i];
677 if (sbuf->m != NULL) {
678 bus_dmamap_sync(sc->rxbuf_dmat, sbuf->map,
679 BUS_DMASYNC_POSTREAD);
680 bus_dmamap_unload(sc->rxbuf_dmat, sbuf->map);
683 bus_dmamap_destroy(sc->rxbuf_dmat, sbuf->map);
685 bus_dma_tag_destroy(sc->rxbuf_dmat);
690 ipw_shutdown(device_t dev)
692 struct ipw_softc *sc = device_get_softc(dev);
700 ipw_suspend(device_t dev)
702 struct ipw_softc *sc = device_get_softc(dev);
710 ipw_resume(device_t dev)
712 struct ipw_softc *sc = device_get_softc(dev);
713 struct ifnet *ifp = sc->sc_ic.ic_ifp;
715 mtx_lock(&sc->sc_mtx);
717 pci_write_config(dev, 0x41, 0, 1);
719 if (ifp->if_flags & IFF_UP) {
720 ifp->if_init(ifp->if_softc);
721 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
725 mtx_unlock(&sc->sc_mtx);
731 ipw_media_change(struct ifnet *ifp)
733 struct ipw_softc *sc = ifp->if_softc;
736 mtx_lock(&sc->sc_mtx);
738 error = ieee80211_media_change(ifp);
739 if (error != ENETRESET) {
740 mtx_unlock(&sc->sc_mtx);
744 if ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
747 mtx_unlock(&sc->sc_mtx);
753 * The firmware automatically adapts the transmit speed. We report its current
757 ipw_media_status(struct ifnet *ifp, struct ifmediareq *imr)
759 #define N(a) (sizeof (a) / sizeof (a[0]))
760 struct ipw_softc *sc = ifp->if_softc;
761 struct ieee80211com *ic = &sc->sc_ic;
762 static const struct {
768 { IPW_RATE_DS5, 11 },
769 { IPW_RATE_DS11, 22 },
774 imr->ifm_status = IFM_AVALID;
775 imr->ifm_active = IFM_IEEE80211;
776 if (ic->ic_state == IEEE80211_S_RUN)
777 imr->ifm_status |= IFM_ACTIVE;
779 /* read current transmission rate from adapter */
780 val = ipw_read_table1(sc, IPW_INFO_CURRENT_TX_RATE) & 0xf;
782 /* convert ipw rate to 802.11 rate */
783 for (i = 0; i < N(rates) && rates[i].val != val; i++);
784 rate = (i < N(rates)) ? rates[i].rate : 0;
786 imr->ifm_active |= IFM_IEEE80211_11B;
787 imr->ifm_active |= ieee80211_rate2media(ic, rate, IEEE80211_MODE_11B);
788 switch (ic->ic_opmode) {
789 case IEEE80211_M_STA:
792 case IEEE80211_M_IBSS:
793 imr->ifm_active |= IFM_IEEE80211_IBSS;
796 case IEEE80211_M_MONITOR:
797 imr->ifm_active |= IFM_IEEE80211_MONITOR;
800 case IEEE80211_M_AHDEMO:
801 case IEEE80211_M_HOSTAP:
802 case IEEE80211_M_WDS:
803 /* should not get there */
810 ipw_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
812 struct ifnet *ifp = ic->ic_ifp;
813 struct ipw_softc *sc = ifp->if_softc;
814 uint8_t macaddr[IEEE80211_ADDR_LEN];
818 case IEEE80211_S_RUN:
819 DELAY(200); /* firmware needs a short delay here */
821 len = IEEE80211_ADDR_LEN;
822 ipw_read_table2(sc, IPW_INFO_CURRENT_BSSID, macaddr, &len);
825 ni = ieee80211_find_node(&ic->ic_scan, macaddr);
829 ieee80211_ref_node(ni);
830 ieee80211_sta_join(ic, ni);
831 ieee80211_node_authorize(ni);
833 if (ic->ic_opmode == IEEE80211_M_STA)
834 ieee80211_notify_node_join(ic, ni, 1);
838 case IEEE80211_S_INIT:
839 case IEEE80211_S_SCAN:
840 case IEEE80211_S_AUTH:
841 case IEEE80211_S_ASSOC:
845 ic->ic_state = nstate;
851 * Read 16 bits at address 'addr' from the serial EEPROM.
854 ipw_read_prom_word(struct ipw_softc *sc, uint8_t addr)
860 /* clock C once before the first command */
861 IPW_EEPROM_CTL(sc, 0);
862 IPW_EEPROM_CTL(sc, IPW_EEPROM_S);
863 IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_C);
864 IPW_EEPROM_CTL(sc, IPW_EEPROM_S);
866 /* write start bit (1) */
867 IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_D);
868 IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_D | IPW_EEPROM_C);
870 /* write READ opcode (10) */
871 IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_D);
872 IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_D | IPW_EEPROM_C);
873 IPW_EEPROM_CTL(sc, IPW_EEPROM_S);
874 IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_C);
876 /* write address A7-A0 */
877 for (n = 7; n >= 0; n--) {
878 IPW_EEPROM_CTL(sc, IPW_EEPROM_S |
879 (((addr >> n) & 1) << IPW_EEPROM_SHIFT_D));
880 IPW_EEPROM_CTL(sc, IPW_EEPROM_S |
881 (((addr >> n) & 1) << IPW_EEPROM_SHIFT_D) | IPW_EEPROM_C);
884 IPW_EEPROM_CTL(sc, IPW_EEPROM_S);
886 /* read data Q15-Q0 */
888 for (n = 15; n >= 0; n--) {
889 IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_C);
890 IPW_EEPROM_CTL(sc, IPW_EEPROM_S);
891 tmp = MEM_READ_4(sc, IPW_MEM_EEPROM_CTL);
892 val |= ((tmp & IPW_EEPROM_Q) >> IPW_EEPROM_SHIFT_Q) << n;
895 IPW_EEPROM_CTL(sc, 0);
897 /* clear Chip Select and clock C */
898 IPW_EEPROM_CTL(sc, IPW_EEPROM_S);
899 IPW_EEPROM_CTL(sc, 0);
900 IPW_EEPROM_CTL(sc, IPW_EEPROM_C);
906 ipw_command_intr(struct ipw_softc *sc, struct ipw_soft_buf *sbuf)
910 bus_dmamap_sync(sc->rxbuf_dmat, sbuf->map, BUS_DMASYNC_POSTREAD);
912 cmd = mtod(sbuf->m, struct ipw_cmd *);
914 DPRINTFN(2, ("cmd ack'ed (%u, %u, %u, %u, %u)\n", le32toh(cmd->type),
915 le32toh(cmd->subtype), le32toh(cmd->seq), le32toh(cmd->len),
916 le32toh(cmd->status)));
922 ipw_newstate_intr(struct ipw_softc *sc, struct ipw_soft_buf *sbuf)
924 struct ieee80211com *ic = &sc->sc_ic;
927 bus_dmamap_sync(sc->rxbuf_dmat, sbuf->map, BUS_DMASYNC_POSTREAD);
929 state = le32toh(*mtod(sbuf->m, uint32_t *));
931 DPRINTFN(2, ("entering state %u\n", state));
934 case IPW_STATE_ASSOCIATED:
935 ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
938 case IPW_STATE_SCANNING:
939 /* don't leave run state on background scan */
940 if (ic->ic_state != IEEE80211_S_RUN)
941 ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
943 ic->ic_flags |= IEEE80211_F_SCAN;
946 case IPW_STATE_SCAN_COMPLETE:
947 ieee80211_notify_scan_done(ic);
948 ic->ic_flags &= ~IEEE80211_F_SCAN;
951 case IPW_STATE_ASSOCIATION_LOST:
952 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
955 case IPW_STATE_RADIO_DISABLED:
956 ic->ic_ifp->if_flags &= ~IFF_UP;
963 * XXX: Hack to set the current channel to the value advertised in beacons or
964 * probe responses. Only used during AP detection.
967 ipw_fix_channel(struct ieee80211com *ic, struct mbuf *m)
969 struct ieee80211_frame *wh;
973 wh = mtod(m, struct ieee80211_frame *);
975 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
978 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
980 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
981 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
984 frm = (uint8_t *)(wh + 1);
985 efrm = mtod(m, uint8_t *) + m->m_len;
987 frm += 12; /* skip tstamp, bintval and capinfo fields */
989 if (*frm == IEEE80211_ELEMID_DSPARMS)
990 #if IEEE80211_CHAN_MAX < 255
991 if (frm[2] <= IEEE80211_CHAN_MAX)
993 ic->ic_bsschan = ieee80211_find_channel(ic,
994 ieee80211_ieee2mhz(frm[2], 0),
995 IEEE80211_MODE_AUTO);
1002 ipw_data_intr(struct ipw_softc *sc, struct ipw_status *status,
1003 struct ipw_soft_bd *sbd, struct ipw_soft_buf *sbuf)
1005 struct ieee80211com *ic = &sc->sc_ic;
1006 struct ifnet *ifp = ic->ic_ifp;
1007 struct mbuf *mnew, *m;
1008 struct ieee80211_frame *wh;
1009 struct ieee80211_node *ni;
1010 bus_addr_t physaddr;
1013 DPRINTFN(5, ("received frame len=%u, rssi=%u\n", le32toh(status->len),
1016 if (le32toh(status->len) < sizeof (struct ieee80211_frame_min) ||
1017 le32toh(status->len) > MCLBYTES)
1021 * Try to allocate a new mbuf for this ring element and load it before
1022 * processing the current mbuf. If the ring element cannot be loaded,
1023 * drop the received packet and reuse the old mbuf. In the unlikely
1024 * case that the old mbuf can't be reloaded either, explicitly panic.
1026 mnew = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1032 bus_dmamap_sync(sc->rxbuf_dmat, sbuf->map, BUS_DMASYNC_POSTREAD);
1033 bus_dmamap_unload(sc->rxbuf_dmat, sbuf->map);
1035 error = bus_dmamap_load(sc->rxbuf_dmat, sbuf->map, mtod(mnew, void *),
1036 MCLBYTES, ipw_dma_map_addr, &physaddr, 0);
1040 /* try to reload the old mbuf */
1041 error = bus_dmamap_load(sc->rxbuf_dmat, sbuf->map,
1042 mtod(sbuf->m, void *), MCLBYTES, ipw_dma_map_addr,
1045 /* very unlikely that it will fail... */
1046 panic("%s: could not load old rx mbuf",
1047 device_get_name(sc->sc_dev));
1054 * New mbuf successfully loaded, update Rx ring and continue
1059 sbd->bd->physaddr = htole32(physaddr);
1062 m->m_pkthdr.rcvif = ifp;
1063 m->m_pkthdr.len = m->m_len = le32toh(status->len);
1065 if (bpf_peers_present(sc->sc_drvbpf)) {
1066 struct ipw_rx_radiotap_header *tap = &sc->sc_rxtap;
1069 tap->wr_antsignal = status->rssi;
1070 tap->wr_chan_freq = htole16(ic->ic_curchan->ic_freq);
1071 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
1073 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
1076 if (ic->ic_state == IEEE80211_S_SCAN)
1077 ipw_fix_channel(ic, m);
1079 wh = mtod(m, struct ieee80211_frame *);
1080 mtx_unlock(&sc->sc_mtx);
1081 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
1083 /* send the frame to the 802.11 layer */
1084 ieee80211_input(ic, m, ni, status->rssi, -95/*XXX*/, 0);
1086 /* node is no longer needed */
1087 ieee80211_free_node(ni);
1088 mtx_lock(&sc->sc_mtx);
1090 bus_dmamap_sync(sc->rbd_dmat, sc->rbd_map, BUS_DMASYNC_PREWRITE);
1094 ipw_rx_intr(struct ipw_softc *sc)
1096 struct ipw_status *status;
1097 struct ipw_soft_bd *sbd;
1098 struct ipw_soft_buf *sbuf;
1101 if (!(sc->flags & IPW_FLAG_FW_INITED))
1104 r = CSR_READ_4(sc, IPW_CSR_RX_READ);
1106 bus_dmamap_sync(sc->status_dmat, sc->status_map, BUS_DMASYNC_POSTREAD);
1108 for (i = (sc->rxcur + 1) % IPW_NRBD; i != r; i = (i + 1) % IPW_NRBD) {
1109 status = &sc->status_list[i];
1110 sbd = &sc->srbd_list[i];
1113 switch (le16toh(status->code) & 0xf) {
1114 case IPW_STATUS_CODE_COMMAND:
1115 ipw_command_intr(sc, sbuf);
1118 case IPW_STATUS_CODE_NEWSTATE:
1119 ipw_newstate_intr(sc, sbuf);
1122 case IPW_STATUS_CODE_DATA_802_3:
1123 case IPW_STATUS_CODE_DATA_802_11:
1124 ipw_data_intr(sc, status, sbd, sbuf);
1127 case IPW_STATUS_CODE_NOTIFICATION:
1128 DPRINTFN(2, ("received notification\n"));
1132 device_printf(sc->sc_dev, "unknown status code %u\n",
1133 le16toh(status->code));
1136 /* firmware was killed, stop processing received frames */
1137 if (!(sc->flags & IPW_FLAG_FW_INITED))
1143 bus_dmamap_sync(sc->rbd_dmat, sc->rbd_map, BUS_DMASYNC_PREWRITE);
1145 /* kick the firmware */
1146 sc->rxcur = (r == 0) ? IPW_NRBD - 1 : r - 1;
1147 CSR_WRITE_4(sc, IPW_CSR_RX_WRITE, sc->rxcur);
1151 ipw_release_sbd(struct ipw_softc *sc, struct ipw_soft_bd *sbd)
1153 struct ipw_soft_hdr *shdr;
1154 struct ipw_soft_buf *sbuf;
1156 switch (sbd->type) {
1157 case IPW_SBD_TYPE_COMMAND:
1158 bus_dmamap_sync(sc->cmd_dmat, sc->cmd_map,
1159 BUS_DMASYNC_POSTWRITE);
1160 bus_dmamap_unload(sc->cmd_dmat, sc->cmd_map);
1163 case IPW_SBD_TYPE_HEADER:
1165 bus_dmamap_sync(sc->hdr_dmat, shdr->map, BUS_DMASYNC_POSTWRITE);
1166 bus_dmamap_unload(sc->hdr_dmat, shdr->map);
1167 SLIST_INSERT_HEAD(&sc->free_shdr, shdr, next);
1170 case IPW_SBD_TYPE_DATA:
1172 bus_dmamap_sync(sc->txbuf_dmat, sbuf->map,
1173 BUS_DMASYNC_POSTWRITE);
1174 bus_dmamap_unload(sc->txbuf_dmat, sbuf->map);
1175 SLIST_INSERT_HEAD(&sc->free_sbuf, sbuf, next);
1177 if (sbuf->m->m_flags & M_TXCB)
1178 ieee80211_process_callback(sbuf->ni, sbuf->m, 0/*XXX*/);
1180 ieee80211_free_node(sbuf->ni);
1182 sc->sc_tx_timer = 0;
1186 sbd->type = IPW_SBD_TYPE_NOASSOC;
1190 ipw_tx_intr(struct ipw_softc *sc)
1192 struct ifnet *ifp = sc->sc_ic.ic_ifp;
1193 struct ipw_soft_bd *sbd;
1196 if (!(sc->flags & IPW_FLAG_FW_INITED))
1199 r = CSR_READ_4(sc, IPW_CSR_TX_READ);
1201 for (i = (sc->txold + 1) % IPW_NTBD; i != r; i = (i + 1) % IPW_NTBD) {
1202 sbd = &sc->stbd_list[i];
1204 if (sbd->type == IPW_SBD_TYPE_DATA)
1207 ipw_release_sbd(sc, sbd);
1211 /* remember what the firmware has processed */
1212 sc->txold = (r == 0) ? IPW_NTBD - 1 : r - 1;
1214 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1221 struct ipw_softc *sc = arg;
1224 mtx_lock(&sc->sc_mtx);
1226 if ((r = CSR_READ_4(sc, IPW_CSR_INTR)) == 0 || r == 0xffffffff) {
1227 mtx_unlock(&sc->sc_mtx);
1231 /* disable interrupts */
1232 CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, 0);
1234 /* acknowledge all interrupts */
1235 CSR_WRITE_4(sc, IPW_CSR_INTR, r);
1237 if (r & (IPW_INTR_FATAL_ERROR | IPW_INTR_PARITY_ERROR)) {
1238 device_printf(sc->sc_dev, "firmware error\n");
1239 taskqueue_enqueue_fast(taskqueue_fast, &sc->sc_init_task);
1240 r = 0; /* don't process more interrupts */
1243 if (r & IPW_INTR_FW_INIT_DONE)
1246 if (r & IPW_INTR_RX_TRANSFER)
1249 if (r & IPW_INTR_TX_TRANSFER)
1252 /* re-enable interrupts */
1253 CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, IPW_INTR_MASK);
1255 mtx_unlock(&sc->sc_mtx);
1259 ipw_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1264 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
1266 *(bus_addr_t *)arg = segs[0].ds_addr;
1270 * Send a command to the firmware and wait for the acknowledgement.
1273 ipw_cmd(struct ipw_softc *sc, uint32_t type, void *data, uint32_t len)
1275 struct ipw_soft_bd *sbd;
1276 bus_addr_t physaddr;
1279 sbd = &sc->stbd_list[sc->txcur];
1281 error = bus_dmamap_load(sc->cmd_dmat, sc->cmd_map, &sc->cmd,
1282 sizeof (struct ipw_cmd), ipw_dma_map_addr, &physaddr, 0);
1284 device_printf(sc->sc_dev, "could not map command DMA memory\n");
1288 sc->cmd.type = htole32(type);
1289 sc->cmd.subtype = 0;
1290 sc->cmd.len = htole32(len);
1292 memcpy(sc->cmd.data, data, len);
1294 sbd->type = IPW_SBD_TYPE_COMMAND;
1295 sbd->bd->physaddr = htole32(physaddr);
1296 sbd->bd->len = htole32(sizeof (struct ipw_cmd));
1298 sbd->bd->flags = IPW_BD_FLAG_TX_FRAME_COMMAND |
1299 IPW_BD_FLAG_TX_LAST_FRAGMENT;
1301 bus_dmamap_sync(sc->cmd_dmat, sc->cmd_map, BUS_DMASYNC_PREWRITE);
1302 bus_dmamap_sync(sc->tbd_dmat, sc->tbd_map, BUS_DMASYNC_PREWRITE);
1304 DPRINTFN(2, ("sending command (%u, %u, %u, %u)\n", type, 0, 0, len));
1308 sc->txcur = (sc->txcur + 1) % IPW_NTBD;
1309 CSR_WRITE_4(sc, IPW_CSR_TX_WRITE, sc->txcur);
1311 /* wait at most one second for command to complete */
1312 return msleep(sc, &sc->sc_mtx, 0, "ipwcmd", hz);
1316 ipw_tx_start(struct ifnet *ifp, struct mbuf *m0, struct ieee80211_node *ni)
1318 struct ipw_softc *sc = ifp->if_softc;
1319 struct ieee80211com *ic = &sc->sc_ic;
1320 struct ieee80211_frame *wh;
1321 struct ipw_soft_bd *sbd;
1322 struct ipw_soft_hdr *shdr;
1323 struct ipw_soft_buf *sbuf;
1324 struct ieee80211_key *k;
1326 bus_dma_segment_t segs[IPW_MAX_NSEG];
1327 bus_addr_t physaddr;
1328 int nsegs, error, i;
1330 wh = mtod(m0, struct ieee80211_frame *);
1332 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
1333 k = ieee80211_crypto_encap(ic, ni, m0);
1339 /* packet header may have moved, reset our local pointer */
1340 wh = mtod(m0, struct ieee80211_frame *);
1343 if (bpf_peers_present(sc->sc_drvbpf)) {
1344 struct ipw_tx_radiotap_header *tap = &sc->sc_txtap;
1347 tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq);
1348 tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags);
1350 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m0);
1353 shdr = SLIST_FIRST(&sc->free_shdr);
1354 sbuf = SLIST_FIRST(&sc->free_sbuf);
1355 KASSERT(shdr != NULL && sbuf != NULL, ("empty sw hdr/buf pool"));
1357 shdr->hdr.type = htole32(IPW_HDR_TYPE_SEND);
1358 shdr->hdr.subtype = 0;
1359 shdr->hdr.encrypted = (wh->i_fc[1] & IEEE80211_FC1_WEP) ? 1 : 0;
1360 shdr->hdr.encrypt = 0;
1361 shdr->hdr.keyidx = 0;
1362 shdr->hdr.keysz = 0;
1363 shdr->hdr.fragmentsz = 0;
1364 IEEE80211_ADDR_COPY(shdr->hdr.src_addr, wh->i_addr2);
1365 if (ic->ic_opmode == IEEE80211_M_STA)
1366 IEEE80211_ADDR_COPY(shdr->hdr.dst_addr, wh->i_addr3);
1368 IEEE80211_ADDR_COPY(shdr->hdr.dst_addr, wh->i_addr1);
1370 /* trim IEEE802.11 header */
1371 m_adj(m0, sizeof (struct ieee80211_frame));
1373 error = bus_dmamap_load_mbuf_sg(sc->txbuf_dmat, sbuf->map, m0, segs,
1375 if (error != 0 && error != EFBIG) {
1376 device_printf(sc->sc_dev, "could not map mbuf (error %d)\n",
1382 mnew = m_defrag(m0, M_DONTWAIT);
1384 device_printf(sc->sc_dev,
1385 "could not defragment mbuf\n");
1391 error = bus_dmamap_load_mbuf_sg(sc->txbuf_dmat, sbuf->map, m0,
1394 device_printf(sc->sc_dev,
1395 "could not map mbuf (error %d)\n", error);
1401 error = bus_dmamap_load(sc->hdr_dmat, shdr->map, &shdr->hdr,
1402 sizeof (struct ipw_hdr), ipw_dma_map_addr, &physaddr, 0);
1404 device_printf(sc->sc_dev, "could not map header DMA memory\n");
1405 bus_dmamap_unload(sc->txbuf_dmat, sbuf->map);
1410 SLIST_REMOVE_HEAD(&sc->free_sbuf, next);
1411 SLIST_REMOVE_HEAD(&sc->free_shdr, next);
1413 sbd = &sc->stbd_list[sc->txcur];
1414 sbd->type = IPW_SBD_TYPE_HEADER;
1416 sbd->bd->physaddr = htole32(physaddr);
1417 sbd->bd->len = htole32(sizeof (struct ipw_hdr));
1418 sbd->bd->nfrag = 1 + nsegs;
1419 sbd->bd->flags = IPW_BD_FLAG_TX_FRAME_802_3 |
1420 IPW_BD_FLAG_TX_NOT_LAST_FRAGMENT;
1422 DPRINTFN(5, ("sending tx hdr (%u, %u, %u, %u, %6D, %6D)\n",
1423 shdr->hdr.type, shdr->hdr.subtype, shdr->hdr.encrypted,
1424 shdr->hdr.encrypt, shdr->hdr.src_addr, ":", shdr->hdr.dst_addr,
1428 sc->txcur = (sc->txcur + 1) % IPW_NTBD;
1433 for (i = 0; i < nsegs; i++) {
1434 sbd = &sc->stbd_list[sc->txcur];
1436 sbd->bd->physaddr = htole32(segs[i].ds_addr);
1437 sbd->bd->len = htole32(segs[i].ds_len);
1439 sbd->bd->flags = IPW_BD_FLAG_TX_FRAME_802_3;
1440 if (i == nsegs - 1) {
1441 sbd->type = IPW_SBD_TYPE_DATA;
1443 sbd->bd->flags |= IPW_BD_FLAG_TX_LAST_FRAGMENT;
1445 sbd->type = IPW_SBD_TYPE_NOASSOC;
1446 sbd->bd->flags |= IPW_BD_FLAG_TX_NOT_LAST_FRAGMENT;
1449 DPRINTFN(5, ("sending fragment (%d, %d)\n", i, segs[i].ds_len));
1452 sc->txcur = (sc->txcur + 1) % IPW_NTBD;
1455 bus_dmamap_sync(sc->hdr_dmat, shdr->map, BUS_DMASYNC_PREWRITE);
1456 bus_dmamap_sync(sc->txbuf_dmat, sbuf->map, BUS_DMASYNC_PREWRITE);
1457 bus_dmamap_sync(sc->tbd_dmat, sc->tbd_map, BUS_DMASYNC_PREWRITE);
1460 CSR_WRITE_4(sc, IPW_CSR_TX_WRITE, sc->txcur);
1466 ipw_start(struct ifnet *ifp)
1468 struct ipw_softc *sc = ifp->if_softc;
1469 struct ieee80211com *ic = &sc->sc_ic;
1471 struct ether_header *eh;
1472 struct ieee80211_node *ni;
1474 mtx_lock(&sc->sc_mtx);
1476 if (ic->ic_state != IEEE80211_S_RUN) {
1477 mtx_unlock(&sc->sc_mtx);
1482 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
1486 if (sc->txfree < 1 + IPW_MAX_NSEG) {
1487 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
1488 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1492 if (m0->m_len < sizeof (struct ether_header) &&
1493 (m0 = m_pullup(m0, sizeof (struct ether_header))) == NULL)
1496 eh = mtod(m0, struct ether_header *);
1497 ni = ieee80211_find_txnode(ic, eh->ether_dhost);
1504 m0 = ieee80211_encap(ic, m0, ni);
1506 ieee80211_free_node(ni);
1510 if (bpf_peers_present(ic->ic_rawbpf))
1511 bpf_mtap(ic->ic_rawbpf, m0);
1513 if (ipw_tx_start(ifp, m0, ni) != 0) {
1514 ieee80211_free_node(ni);
1519 /* start watchdog timer */
1520 sc->sc_tx_timer = 5;
1524 mtx_unlock(&sc->sc_mtx);
1528 ipw_watchdog(struct ifnet *ifp)
1530 struct ipw_softc *sc = ifp->if_softc;
1532 mtx_lock(&sc->sc_mtx);
1536 if (sc->sc_tx_timer > 0) {
1537 if (--sc->sc_tx_timer == 0) {
1538 if_printf(ifp, "device timeout\n");
1540 taskqueue_enqueue_fast(taskqueue_fast,
1542 mtx_unlock(&sc->sc_mtx);
1548 mtx_unlock(&sc->sc_mtx);
1552 ipw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1554 struct ipw_softc *sc = ifp->if_softc;
1555 struct ieee80211com *ic = &sc->sc_ic;
1558 mtx_lock(&sc->sc_mtx);
1562 if (ifp->if_flags & IFF_UP) {
1563 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1566 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1572 error = ieee80211_ioctl(ic, cmd, data);
1575 if (error == ENETRESET) {
1576 if ((ifp->if_flags & IFF_UP) &&
1577 (ifp->if_drv_flags & IFF_DRV_RUNNING))
1582 mtx_unlock(&sc->sc_mtx);
1588 ipw_stop_master(struct ipw_softc *sc)
1593 /* disable interrupts */
1594 CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, 0);
1596 CSR_WRITE_4(sc, IPW_CSR_RST, IPW_RST_STOP_MASTER);
1597 for (ntries = 0; ntries < 50; ntries++) {
1598 if (CSR_READ_4(sc, IPW_CSR_RST) & IPW_RST_MASTER_DISABLED)
1603 device_printf(sc->sc_dev, "timeout waiting for master\n");
1605 tmp = CSR_READ_4(sc, IPW_CSR_RST);
1606 CSR_WRITE_4(sc, IPW_CSR_RST, tmp | IPW_RST_PRINCETON_RESET);
1608 sc->flags &= ~IPW_FLAG_FW_INITED;
1612 ipw_reset(struct ipw_softc *sc)
1617 ipw_stop_master(sc);
1619 /* move adapter to D0 state */
1620 tmp = CSR_READ_4(sc, IPW_CSR_CTL);
1621 CSR_WRITE_4(sc, IPW_CSR_CTL, tmp | IPW_CTL_INIT);
1623 /* wait for clock stabilization */
1624 for (ntries = 0; ntries < 1000; ntries++) {
1625 if (CSR_READ_4(sc, IPW_CSR_CTL) & IPW_CTL_CLOCK_READY)
1632 tmp = CSR_READ_4(sc, IPW_CSR_RST);
1633 CSR_WRITE_4(sc, IPW_CSR_RST, tmp | IPW_RST_SW_RESET);
1637 tmp = CSR_READ_4(sc, IPW_CSR_CTL);
1638 CSR_WRITE_4(sc, IPW_CSR_CTL, tmp | IPW_CTL_INIT);
1644 * Upload the microcode to the device.
1647 ipw_load_ucode(struct ipw_softc *sc, const char *uc, int size)
1651 MEM_WRITE_4(sc, 0x3000e0, 0x80000000);
1652 CSR_WRITE_4(sc, IPW_CSR_RST, 0);
1654 MEM_WRITE_2(sc, 0x220000, 0x0703);
1655 MEM_WRITE_2(sc, 0x220000, 0x0707);
1657 MEM_WRITE_1(sc, 0x210014, 0x72);
1658 MEM_WRITE_1(sc, 0x210014, 0x72);
1660 MEM_WRITE_1(sc, 0x210000, 0x40);
1661 MEM_WRITE_1(sc, 0x210000, 0x00);
1662 MEM_WRITE_1(sc, 0x210000, 0x40);
1664 MEM_WRITE_MULTI_1(sc, 0x210010, uc, size);
1666 MEM_WRITE_1(sc, 0x210000, 0x00);
1667 MEM_WRITE_1(sc, 0x210000, 0x00);
1668 MEM_WRITE_1(sc, 0x210000, 0x80);
1670 MEM_WRITE_2(sc, 0x220000, 0x0703);
1671 MEM_WRITE_2(sc, 0x220000, 0x0707);
1673 MEM_WRITE_1(sc, 0x210014, 0x72);
1674 MEM_WRITE_1(sc, 0x210014, 0x72);
1676 MEM_WRITE_1(sc, 0x210000, 0x00);
1677 MEM_WRITE_1(sc, 0x210000, 0x80);
1679 for (ntries = 0; ntries < 10; ntries++) {
1680 if (MEM_READ_1(sc, 0x210000) & 1)
1685 device_printf(sc->sc_dev,
1686 "timeout waiting for ucode to initialize\n");
1690 MEM_WRITE_4(sc, 0x3000e0, 0);
1695 /* set of macros to handle unaligned little endian data in firmware image */
1696 #define GETLE32(p) ((p)[0] | (p)[1] << 8 | (p)[2] << 16 | (p)[3] << 24)
1697 #define GETLE16(p) ((p)[0] | (p)[1] << 8)
1699 ipw_load_firmware(struct ipw_softc *sc, const char *fw, int size)
1701 const uint8_t *p, *end;
1709 dst = GETLE32(p); p += 4;
1710 len = GETLE16(p); p += 2;
1712 ipw_write_mem_1(sc, dst, p, len);
1716 CSR_WRITE_4(sc, IPW_CSR_IO, IPW_IO_GPIO1_ENABLE | IPW_IO_GPIO3_MASK |
1719 /* enable interrupts */
1720 CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, IPW_INTR_MASK);
1722 /* kick the firmware */
1723 CSR_WRITE_4(sc, IPW_CSR_RST, 0);
1725 tmp = CSR_READ_4(sc, IPW_CSR_CTL);
1726 CSR_WRITE_4(sc, IPW_CSR_CTL, tmp | IPW_CTL_ALLOW_STANDBY);
1728 /* wait at most one second for firmware initialization to complete */
1729 if ((error = msleep(sc, &sc->sc_mtx, 0, "ipwinit", hz)) != 0) {
1730 device_printf(sc->sc_dev, "timeout waiting for firmware "
1731 "initialization to complete\n");
1735 tmp = CSR_READ_4(sc, IPW_CSR_IO);
1736 CSR_WRITE_4(sc, IPW_CSR_IO, tmp | IPW_IO_GPIO1_MASK |
1743 ipw_config(struct ipw_softc *sc)
1745 struct ieee80211com *ic = &sc->sc_ic;
1746 struct ifnet *ifp = ic->ic_ifp;
1747 struct ipw_security security;
1748 struct ieee80211_key *k;
1749 struct ipw_wep_key wepkey;
1750 struct ipw_scan_options options;
1751 struct ipw_configuration config;
1755 switch (ic->ic_opmode) {
1756 case IEEE80211_M_STA:
1757 case IEEE80211_M_HOSTAP:
1758 case IEEE80211_M_WDS: /* XXX */
1759 data = htole32(IPW_MODE_BSS);
1761 case IEEE80211_M_IBSS:
1762 case IEEE80211_M_AHDEMO:
1763 data = htole32(IPW_MODE_IBSS);
1765 case IEEE80211_M_MONITOR:
1766 data = htole32(IPW_MODE_MONITOR);
1769 DPRINTF(("Setting mode to %u\n", le32toh(data)));
1770 error = ipw_cmd(sc, IPW_CMD_SET_MODE, &data, sizeof data);
1774 if (ic->ic_opmode == IEEE80211_M_IBSS ||
1775 ic->ic_opmode == IEEE80211_M_MONITOR) {
1776 data = htole32(ieee80211_chan2ieee(ic, ic->ic_curchan));
1777 DPRINTF(("Setting channel to %u\n", le32toh(data)));
1778 error = ipw_cmd(sc, IPW_CMD_SET_CHANNEL, &data, sizeof data);
1783 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
1784 DPRINTF(("Enabling adapter\n"));
1785 return ipw_cmd(sc, IPW_CMD_ENABLE, NULL, 0);
1788 IEEE80211_ADDR_COPY(ic->ic_myaddr, IF_LLADDR(ifp));
1789 DPRINTF(("Setting MAC address to %6D\n", ic->ic_myaddr, ":"));
1790 error = ipw_cmd(sc, IPW_CMD_SET_MAC_ADDRESS, ic->ic_myaddr,
1791 IEEE80211_ADDR_LEN);
1795 config.flags = htole32(IPW_CFG_BSS_MASK | IPW_CFG_IBSS_MASK |
1796 IPW_CFG_PREAMBLE_AUTO | IPW_CFG_802_1x_ENABLE);
1797 if (ic->ic_opmode == IEEE80211_M_IBSS)
1798 config.flags |= htole32(IPW_CFG_IBSS_AUTO_START);
1799 if (ifp->if_flags & IFF_PROMISC)
1800 config.flags |= htole32(IPW_CFG_PROMISCUOUS);
1801 config.bss_chan = htole32(0x3fff); /* channels 1-14 */
1802 config.ibss_chan = htole32(0x7ff); /* channels 1-11 */
1803 DPRINTF(("Setting configuration to 0x%x\n", le32toh(config.flags)));
1804 error = ipw_cmd(sc, IPW_CMD_SET_CONFIGURATION, &config, sizeof config);
1808 data = htole32(0x3); /* 1, 2 */
1809 DPRINTF(("Setting basic tx rates to 0x%x\n", le32toh(data)));
1810 error = ipw_cmd(sc, IPW_CMD_SET_BASIC_TX_RATES, &data, sizeof data);
1814 data = htole32(0xf); /* 1, 2, 5.5, 11 */
1815 DPRINTF(("Setting tx rates to 0x%x\n", le32toh(data)));
1816 error = ipw_cmd(sc, IPW_CMD_SET_TX_RATES, &data, sizeof data);
1820 data = htole32(IPW_POWER_MODE_CAM);
1821 DPRINTF(("Setting power mode to %u\n", le32toh(data)));
1822 error = ipw_cmd(sc, IPW_CMD_SET_POWER_MODE, &data, sizeof data);
1826 if (ic->ic_opmode == IEEE80211_M_IBSS) {
1827 data = htole32(32); /* default value */
1828 DPRINTF(("Setting tx power index to %u\n", le32toh(data)));
1829 error = ipw_cmd(sc, IPW_CMD_SET_TX_POWER_INDEX, &data,
1835 data = htole32(ic->ic_rtsthreshold);
1836 DPRINTF(("Setting RTS threshold to %u\n", le32toh(data)));
1837 error = ipw_cmd(sc, IPW_CMD_SET_RTS_THRESHOLD, &data, sizeof data);
1841 data = htole32(ic->ic_fragthreshold);
1842 DPRINTF(("Setting frag threshold to %u\n", le32toh(data)));
1843 error = ipw_cmd(sc, IPW_CMD_SET_FRAG_THRESHOLD, &data, sizeof data);
1848 if (ipw_debug > 0) {
1849 printf("Setting ESSID to ");
1850 ieee80211_print_essid(ic->ic_des_ssid[0].ssid,
1851 ic->ic_des_ssid[0].len);
1855 error = ipw_cmd(sc, IPW_CMD_SET_ESSID, ic->ic_des_ssid[0].ssid,
1856 ic->ic_des_ssid[0].len);
1860 /* no mandatory BSSID */
1861 DPRINTF(("Setting mandatory BSSID to null\n"));
1862 error = ipw_cmd(sc, IPW_CMD_SET_MANDATORY_BSSID, NULL, 0);
1866 if (ic->ic_flags & IEEE80211_F_DESBSSID) {
1867 DPRINTF(("Setting desired BSSID to %6D\n", ic->ic_des_bssid,
1869 error = ipw_cmd(sc, IPW_CMD_SET_DESIRED_BSSID,
1870 ic->ic_des_bssid, IEEE80211_ADDR_LEN);
1875 memset(&security, 0, sizeof security);
1876 security.authmode = (ic->ic_bss->ni_authmode == IEEE80211_AUTH_SHARED) ?
1877 IPW_AUTH_SHARED : IPW_AUTH_OPEN;
1878 security.ciphers = htole32(IPW_CIPHER_NONE);
1879 DPRINTF(("Setting authmode to %u\n", security.authmode));
1880 error = ipw_cmd(sc, IPW_CMD_SET_SECURITY_INFORMATION, &security,
1885 if (ic->ic_flags & IEEE80211_F_PRIVACY) {
1886 k = ic->ic_crypto.cs_nw_keys;
1887 for (i = 0; i < IEEE80211_WEP_NKID; i++, k++) {
1888 if (k->wk_keylen == 0)
1892 wepkey.len = k->wk_keylen;
1893 memset(wepkey.key, 0, sizeof wepkey.key);
1894 memcpy(wepkey.key, k->wk_key, k->wk_keylen);
1895 DPRINTF(("Setting wep key index %u len %u\n",
1896 wepkey.idx, wepkey.len));
1897 error = ipw_cmd(sc, IPW_CMD_SET_WEP_KEY, &wepkey,
1903 data = htole32(ic->ic_crypto.cs_def_txkey);
1904 DPRINTF(("Setting wep tx key index to %u\n", le32toh(data)));
1905 error = ipw_cmd(sc, IPW_CMD_SET_WEP_KEY_INDEX, &data,
1911 data = htole32((ic->ic_flags & IEEE80211_F_PRIVACY) ? IPW_WEPON : 0);
1912 DPRINTF(("Setting wep flags to 0x%x\n", le32toh(data)));
1913 error = ipw_cmd(sc, IPW_CMD_SET_WEP_FLAGS, &data, sizeof data);
1918 struct ipw_wpa_ie ie;
1920 memset(&ie, 0, sizeof ie);
1921 ie.len = htole32(sizeof (struct ieee80211_ie_wpa));
1922 DPRINTF(("Setting wpa ie\n"));
1923 error = ipw_cmd(sc, IPW_CMD_SET_WPA_IE, &ie, sizeof ie);
1928 if (ic->ic_opmode == IEEE80211_M_IBSS) {
1929 data = htole32(ic->ic_bintval);
1930 DPRINTF(("Setting beacon interval to %u\n", le32toh(data)));
1931 error = ipw_cmd(sc, IPW_CMD_SET_BEACON_INTERVAL, &data,
1938 options.channels = htole32(0x3fff); /* scan channels 1-14 */
1939 DPRINTF(("Setting scan options to 0x%x\n", le32toh(options.flags)));
1940 error = ipw_cmd(sc, IPW_CMD_SET_SCAN_OPTIONS, &options, sizeof options);
1944 /* finally, enable adapter (start scanning for an access point) */
1945 DPRINTF(("Enabling adapter\n"));
1946 return ipw_cmd(sc, IPW_CMD_ENABLE, NULL, 0);
1950 * Handler for sc_init_task. This is a simple wrapper around ipw_init().
1951 * It is called on firmware panics or on watchdog timeouts.
1954 ipw_init_task(void *context, int pending)
1960 ipw_init(void *priv)
1962 struct ipw_softc *sc = priv;
1963 struct ieee80211com *ic = &sc->sc_ic;
1964 struct ifnet *ifp = ic->ic_ifp;
1965 const struct firmware *fp;
1966 const struct ipw_firmware_hdr *hdr;
1967 const char *imagename, *fw;
1971 * ipw_init() is exposed through ifp->if_init so it might be called
1972 * without the driver's lock held. Since msleep() doesn't like being
1973 * called on a recursed mutex, we acquire the driver's lock only if
1974 * we're not already holding it.
1976 if (!(owned = mtx_owned(&sc->sc_mtx)))
1977 mtx_lock(&sc->sc_mtx);
1980 * Avoid re-entrant calls. We need to release the mutex in ipw_init()
1981 * when loading the firmware and we don't want to be called during this
1984 if (sc->flags & IPW_FLAG_INIT_LOCKED) {
1986 mtx_unlock(&sc->sc_mtx);
1989 sc->flags |= IPW_FLAG_INIT_LOCKED;
1993 if (ipw_reset(sc) != 0) {
1994 device_printf(sc->sc_dev, "could not reset adapter\n");
1998 switch (ic->ic_opmode) {
1999 case IEEE80211_M_STA:
2000 imagename = "ipw_bss";
2002 case IEEE80211_M_IBSS:
2003 imagename = "ipw_ibss";
2005 case IEEE80211_M_MONITOR:
2006 imagename = "ipw_monitor";
2009 imagename = NULL; /* should not get there */
2013 * Load firmware image using the firmware(9) subsystem. We need to
2014 * release the driver's lock first.
2016 if (sc->sc_firmware == NULL || strcmp(sc->sc_firmware->name,
2018 mtx_unlock(&sc->sc_mtx);
2019 if (sc->sc_firmware != NULL)
2020 firmware_put(sc->sc_firmware, FIRMWARE_UNLOAD);
2021 sc->sc_firmware = firmware_get(imagename);
2022 mtx_lock(&sc->sc_mtx);
2025 if (sc->sc_firmware == NULL) {
2026 device_printf(sc->sc_dev,
2027 "could not load firmware image '%s'\n", imagename);
2031 fp = sc->sc_firmware;
2032 if (fp->datasize < sizeof *hdr) {
2033 device_printf(sc->sc_dev,
2034 "firmware image too short %zu\n", fp->datasize);
2038 hdr = (const struct ipw_firmware_hdr *)fp->data;
2040 if (fp->datasize < sizeof *hdr + le32toh(hdr->mainsz) +
2041 le32toh(hdr->ucodesz)) {
2042 device_printf(sc->sc_dev,
2043 "firmware image too short %zu\n", fp->datasize);
2047 fw = (const char *)fp->data + sizeof *hdr + le32toh(hdr->mainsz);
2048 if (ipw_load_ucode(sc, fw, le32toh(hdr->ucodesz)) != 0) {
2049 device_printf(sc->sc_dev, "could not load microcode\n");
2053 ipw_stop_master(sc);
2056 * Setup tx, rx and status rings.
2058 sc->txold = IPW_NTBD - 1;
2060 sc->txfree = IPW_NTBD - 2;
2061 sc->rxcur = IPW_NRBD - 1;
2063 CSR_WRITE_4(sc, IPW_CSR_TX_BASE, sc->tbd_phys);
2064 CSR_WRITE_4(sc, IPW_CSR_TX_SIZE, IPW_NTBD);
2065 CSR_WRITE_4(sc, IPW_CSR_TX_READ, 0);
2066 CSR_WRITE_4(sc, IPW_CSR_TX_WRITE, sc->txcur);
2068 CSR_WRITE_4(sc, IPW_CSR_RX_BASE, sc->rbd_phys);
2069 CSR_WRITE_4(sc, IPW_CSR_RX_SIZE, IPW_NRBD);
2070 CSR_WRITE_4(sc, IPW_CSR_RX_READ, 0);
2071 CSR_WRITE_4(sc, IPW_CSR_RX_WRITE, sc->rxcur);
2073 CSR_WRITE_4(sc, IPW_CSR_STATUS_BASE, sc->status_phys);
2075 fw = (const char *)fp->data + sizeof *hdr;
2076 if (ipw_load_firmware(sc, fw, le32toh(hdr->mainsz)) != 0) {
2077 device_printf(sc->sc_dev, "could not load firmware\n");
2081 sc->flags |= IPW_FLAG_FW_INITED;
2083 /* retrieve information tables base addresses */
2084 sc->table1_base = CSR_READ_4(sc, IPW_CSR_TABLE1_BASE);
2085 sc->table2_base = CSR_READ_4(sc, IPW_CSR_TABLE2_BASE);
2087 ipw_write_table1(sc, IPW_INFO_LOCK, 0);
2089 if (ipw_config(sc) != 0) {
2090 device_printf(sc->sc_dev, "device configuration failed\n");
2094 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2095 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2097 sc->flags &=~ IPW_FLAG_INIT_LOCKED;
2100 mtx_unlock(&sc->sc_mtx);
2104 fail2: firmware_put(fp, FIRMWARE_UNLOAD);
2105 sc->sc_firmware = NULL;
2106 fail1: ifp->if_flags &= ~IFF_UP;
2108 sc->flags &=~ IPW_FLAG_INIT_LOCKED;
2110 mtx_unlock(&sc->sc_mtx);
2114 ipw_stop(void *priv)
2116 struct ipw_softc *sc = priv;
2117 struct ieee80211com *ic = &sc->sc_ic;
2118 struct ifnet *ifp = ic->ic_ifp;
2121 mtx_lock(&sc->sc_mtx);
2123 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
2125 ipw_stop_master(sc);
2127 CSR_WRITE_4(sc, IPW_CSR_RST, IPW_RST_SW_RESET);
2130 * Release tx buffers.
2132 for (i = 0; i < IPW_NTBD; i++)
2133 ipw_release_sbd(sc, &sc->stbd_list[i]);
2135 sc->sc_tx_timer = 0;
2137 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2139 mtx_unlock(&sc->sc_mtx);
2143 ipw_sysctl_stats(SYSCTL_HANDLER_ARGS)
2145 struct ipw_softc *sc = arg1;
2146 uint32_t i, size, buf[256];
2148 if (!(sc->flags & IPW_FLAG_FW_INITED)) {
2149 memset(buf, 0, sizeof buf);
2150 return SYSCTL_OUT(req, buf, sizeof buf);
2153 CSR_WRITE_4(sc, IPW_CSR_AUTOINC_ADDR, sc->table1_base);
2155 size = min(CSR_READ_4(sc, IPW_CSR_AUTOINC_DATA), 256);
2156 for (i = 1; i < size; i++)
2157 buf[i] = MEM_READ_4(sc, CSR_READ_4(sc, IPW_CSR_AUTOINC_DATA));
2159 return SYSCTL_OUT(req, buf, sizeof buf);
2163 ipw_sysctl_radio(SYSCTL_HANDLER_ARGS)
2165 struct ipw_softc *sc = arg1;
2168 val = !((sc->flags & IPW_FLAG_HAS_RADIO_SWITCH) &&
2169 (CSR_READ_4(sc, IPW_CSR_IO) & IPW_IO_RADIO_DISABLED));
2171 return SYSCTL_OUT(req, &val, sizeof val);
2175 ipw_read_table1(struct ipw_softc *sc, uint32_t off)
2177 return MEM_READ_4(sc, MEM_READ_4(sc, sc->table1_base + off));
2181 ipw_write_table1(struct ipw_softc *sc, uint32_t off, uint32_t info)
2183 MEM_WRITE_4(sc, MEM_READ_4(sc, sc->table1_base + off), info);
2187 ipw_read_table2(struct ipw_softc *sc, uint32_t off, void *buf, uint32_t *len)
2189 uint32_t addr, info;
2190 uint16_t count, size;
2193 /* addr[4] + count[2] + size[2] */
2194 addr = MEM_READ_4(sc, sc->table2_base + off);
2195 info = MEM_READ_4(sc, sc->table2_base + off + 4);
2198 size = info & 0xffff;
2199 total = count * size;
2207 ipw_read_mem_1(sc, addr, buf, total);
2213 ipw_read_mem_1(struct ipw_softc *sc, bus_size_t offset, uint8_t *datap,
2216 for (; count > 0; offset++, datap++, count--) {
2217 CSR_WRITE_4(sc, IPW_CSR_INDIRECT_ADDR, offset & ~3);
2218 *datap = CSR_READ_1(sc, IPW_CSR_INDIRECT_DATA + (offset & 3));
2223 ipw_write_mem_1(struct ipw_softc *sc, bus_size_t offset, const uint8_t *datap,
2226 for (; count > 0; offset++, datap++, count--) {
2227 CSR_WRITE_4(sc, IPW_CSR_INDIRECT_ADDR, offset & ~3);
2228 CSR_WRITE_1(sc, IPW_CSR_INDIRECT_DATA + (offset & 3), *datap);