1 /* $OpenBSD: if_iwm.c,v 1.39 2015/03/23 00:35:19 jsg Exp $ */
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
27 ***********************************************************************
29 * This file is provided under a dual BSD/GPLv2 license. When using or
30 * redistributing this file, you may do so under either license.
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
53 * Contact Information:
54 * Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
67 * * Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * * Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in
71 * the documentation and/or other materials provided with the
73 * * Neither the name Intel Corporation nor the names of its
74 * contributors may be used to endorse or promote products derived
75 * from this software without specific prior written permission.
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
108 #include "opt_wlan.h"
110 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_util.h>
157 #include <dev/iwm/if_iwm_binding.h>
158 #include <dev/iwm/if_iwm_phy_db.h>
159 #include <dev/iwm/if_iwm_mac_ctxt.h>
160 #include <dev/iwm/if_iwm_phy_ctxt.h>
161 #include <dev/iwm/if_iwm_time_event.h>
162 #include <dev/iwm/if_iwm_power.h>
163 #include <dev/iwm/if_iwm_scan.h>
165 #include <dev/iwm/if_iwm_pcie_trans.h>
167 const uint8_t iwm_nvm_channels[] = {
169 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
171 36, 40, 44, 48, 52, 56, 60, 64,
172 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
173 149, 153, 157, 161, 165
175 #define IWM_NUM_2GHZ_CHANNELS 14
177 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
178 "IWM_NUM_CHANNELS is too small");
181 * XXX For now, there's simply a fixed set of rate table entries
182 * that are populated.
184 const struct iwm_rate {
188 { 2, IWM_RATE_1M_PLCP },
189 { 4, IWM_RATE_2M_PLCP },
190 { 11, IWM_RATE_5M_PLCP },
191 { 22, IWM_RATE_11M_PLCP },
192 { 12, IWM_RATE_6M_PLCP },
193 { 18, IWM_RATE_9M_PLCP },
194 { 24, IWM_RATE_12M_PLCP },
195 { 36, IWM_RATE_18M_PLCP },
196 { 48, IWM_RATE_24M_PLCP },
197 { 72, IWM_RATE_36M_PLCP },
198 { 96, IWM_RATE_48M_PLCP },
199 { 108, IWM_RATE_54M_PLCP },
201 #define IWM_RIDX_CCK 0
202 #define IWM_RIDX_OFDM 4
203 #define IWM_RIDX_MAX (nitems(iwm_rates)-1)
204 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
205 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
207 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
208 static int iwm_firmware_store_section(struct iwm_softc *,
210 const uint8_t *, size_t);
211 static int iwm_set_default_calib(struct iwm_softc *, const void *);
212 static void iwm_fw_info_free(struct iwm_fw_info *);
213 static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
214 static void iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
215 static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
216 bus_size_t, bus_size_t);
217 static void iwm_dma_contig_free(struct iwm_dma_info *);
218 static int iwm_alloc_fwmem(struct iwm_softc *);
219 static void iwm_free_fwmem(struct iwm_softc *);
220 static int iwm_alloc_sched(struct iwm_softc *);
221 static void iwm_free_sched(struct iwm_softc *);
222 static int iwm_alloc_kw(struct iwm_softc *);
223 static void iwm_free_kw(struct iwm_softc *);
224 static int iwm_alloc_ict(struct iwm_softc *);
225 static void iwm_free_ict(struct iwm_softc *);
226 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
227 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
228 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
229 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
231 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
232 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
233 static void iwm_enable_interrupts(struct iwm_softc *);
234 static void iwm_restore_interrupts(struct iwm_softc *);
235 static void iwm_disable_interrupts(struct iwm_softc *);
236 static void iwm_ict_reset(struct iwm_softc *);
237 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
238 static void iwm_stop_device(struct iwm_softc *);
239 static void iwm_mvm_nic_config(struct iwm_softc *);
240 static int iwm_nic_rx_init(struct iwm_softc *);
241 static int iwm_nic_tx_init(struct iwm_softc *);
242 static int iwm_nic_init(struct iwm_softc *);
243 static void iwm_enable_txq(struct iwm_softc *, int, int);
244 static int iwm_post_alive(struct iwm_softc *);
245 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
246 uint16_t, uint8_t *, uint16_t *);
247 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
249 static uint32_t iwm_eeprom_channel_flags(uint16_t);
250 static void iwm_add_channel_band(struct iwm_softc *,
251 struct ieee80211_channel[], int, int *, int, int,
253 static void iwm_init_channel_map(struct ieee80211com *, int, int *,
254 struct ieee80211_channel[]);
255 static int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
256 const uint16_t *, const uint16_t *, uint8_t,
258 struct iwm_nvm_section;
259 static int iwm_parse_nvm_sections(struct iwm_softc *,
260 struct iwm_nvm_section *);
261 static int iwm_nvm_init(struct iwm_softc *);
262 static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
263 const uint8_t *, uint32_t);
264 static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
265 static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
266 static int iwm_fw_alive(struct iwm_softc *, uint32_t);
267 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
268 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
269 static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
270 enum iwm_ucode_type);
271 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
272 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
273 static int iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
274 static int iwm_mvm_get_signal_strength(struct iwm_softc *,
275 struct iwm_rx_phy_info *);
276 static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
277 struct iwm_rx_packet *,
278 struct iwm_rx_data *);
279 static int iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
280 static void iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
281 struct iwm_rx_data *);
282 static int iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
283 struct iwm_rx_packet *,
285 static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
286 struct iwm_rx_data *);
287 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
289 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
292 static const struct iwm_rate *
293 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
294 struct ieee80211_frame *, struct iwm_tx_cmd *);
295 static int iwm_tx(struct iwm_softc *, struct mbuf *,
296 struct ieee80211_node *, int);
297 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
298 const struct ieee80211_bpf_params *);
299 static void iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
300 struct iwm_mvm_add_sta_cmd_v5 *);
301 static int iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
302 struct iwm_mvm_add_sta_cmd_v6 *,
304 static int iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
306 static int iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
307 static int iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
308 static int iwm_mvm_add_int_sta_common(struct iwm_softc *,
309 struct iwm_int_sta *,
310 const uint8_t *, uint16_t, uint16_t);
311 static int iwm_mvm_add_aux_sta(struct iwm_softc *);
312 static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
313 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *);
314 static int iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
315 static int iwm_release(struct iwm_softc *, struct iwm_node *);
316 static struct ieee80211_node *
317 iwm_node_alloc(struct ieee80211vap *,
318 const uint8_t[IEEE80211_ADDR_LEN]);
319 static void iwm_setrates(struct iwm_softc *, struct iwm_node *);
320 static int iwm_media_change(struct ifnet *);
321 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
322 static void iwm_endscan_cb(void *, int);
323 static int iwm_init_hw(struct iwm_softc *);
324 static void iwm_init(struct iwm_softc *);
325 static void iwm_start(struct iwm_softc *);
326 static void iwm_stop(struct iwm_softc *);
327 static void iwm_watchdog(void *);
328 static void iwm_parent(struct ieee80211com *);
331 iwm_desc_lookup(uint32_t);
332 static void iwm_nic_error(struct iwm_softc *);
334 static void iwm_notif_intr(struct iwm_softc *);
335 static void iwm_intr(void *);
336 static int iwm_attach(device_t);
337 static void iwm_preinit(void *);
338 static int iwm_detach_local(struct iwm_softc *sc, int);
339 static void iwm_init_task(void *);
340 static void iwm_radiotap_attach(struct iwm_softc *);
341 static struct ieee80211vap *
342 iwm_vap_create(struct ieee80211com *,
343 const char [IFNAMSIZ], int,
344 enum ieee80211_opmode, int,
345 const uint8_t [IEEE80211_ADDR_LEN],
346 const uint8_t [IEEE80211_ADDR_LEN]);
347 static void iwm_vap_delete(struct ieee80211vap *);
348 static void iwm_scan_start(struct ieee80211com *);
349 static void iwm_scan_end(struct ieee80211com *);
350 static void iwm_update_mcast(struct ieee80211com *);
351 static void iwm_set_channel(struct ieee80211com *);
352 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
353 static void iwm_scan_mindwell(struct ieee80211_scan_state *);
354 static int iwm_detach(device_t);
361 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
363 const struct iwm_fw_cscheme_list *l = (const void *)data;
365 if (dlen < sizeof(*l) ||
366 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
369 /* we don't actually store anything for now, always use s/w crypto */
375 iwm_firmware_store_section(struct iwm_softc *sc,
376 enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
378 struct iwm_fw_sects *fws;
379 struct iwm_fw_onesect *fwone;
381 if (type >= IWM_UCODE_TYPE_MAX)
383 if (dlen < sizeof(uint32_t))
386 fws = &sc->sc_fw.fw_sects[type];
387 if (fws->fw_count >= IWM_UCODE_SECT_MAX)
390 fwone = &fws->fw_sect[fws->fw_count];
392 /* first 32bit are device load offset */
393 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
396 fwone->fws_data = data + sizeof(uint32_t);
397 fwone->fws_len = dlen - sizeof(uint32_t);
400 fws->fw_totlen += fwone->fws_len;
405 /* iwlwifi: iwl-drv.c */
406 struct iwm_tlv_calib_data {
408 struct iwm_tlv_calib_ctrl calib;
412 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
414 const struct iwm_tlv_calib_data *def_calib = data;
415 uint32_t ucode_type = le32toh(def_calib->ucode_type);
417 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
418 device_printf(sc->sc_dev,
419 "Wrong ucode_type %u for default "
420 "calibration.\n", ucode_type);
424 sc->sc_default_calib[ucode_type].flow_trigger =
425 def_calib->calib.flow_trigger;
426 sc->sc_default_calib[ucode_type].event_trigger =
427 def_calib->calib.event_trigger;
433 iwm_fw_info_free(struct iwm_fw_info *fw)
435 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
437 /* don't touch fw->fw_status */
438 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
442 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
444 struct iwm_fw_info *fw = &sc->sc_fw;
445 const struct iwm_tlv_ucode_header *uhdr;
446 struct iwm_ucode_tlv tlv;
447 enum iwm_ucode_tlv_type tlv_type;
448 const struct firmware *fwp;
453 if (fw->fw_status == IWM_FW_STATUS_DONE &&
454 ucode_type != IWM_UCODE_TYPE_INIT)
457 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
458 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
459 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
461 if (fw->fw_fp != NULL)
462 iwm_fw_info_free(fw);
465 * Load firmware into driver memory.
469 fwp = firmware_get(sc->sc_fwname);
472 device_printf(sc->sc_dev,
473 "could not read firmware %s (error %d)\n",
474 sc->sc_fwname, error);
480 * Parse firmware contents
483 uhdr = (const void *)fw->fw_fp->data;
484 if (*(const uint32_t *)fw->fw_fp->data != 0
485 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
486 device_printf(sc->sc_dev, "invalid firmware %s\n",
492 sc->sc_fwver = le32toh(uhdr->ver);
494 len = fw->fw_fp->datasize - sizeof(*uhdr);
496 while (len >= sizeof(tlv)) {
498 const void *tlv_data;
500 memcpy(&tlv, data, sizeof(tlv));
501 tlv_len = le32toh(tlv.length);
502 tlv_type = le32toh(tlv.type);
509 device_printf(sc->sc_dev,
510 "firmware too short: %zu bytes\n",
516 switch ((int)tlv_type) {
517 case IWM_UCODE_TLV_PROBE_MAX_LEN:
518 if (tlv_len < sizeof(uint32_t)) {
519 device_printf(sc->sc_dev,
520 "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
526 sc->sc_capa_max_probe_len
527 = le32toh(*(const uint32_t *)tlv_data);
528 /* limit it to something sensible */
529 if (sc->sc_capa_max_probe_len > (1<<16)) {
530 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
531 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
532 "ridiculous\n", __func__);
537 case IWM_UCODE_TLV_PAN:
539 device_printf(sc->sc_dev,
540 "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
546 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
548 case IWM_UCODE_TLV_FLAGS:
549 if (tlv_len < sizeof(uint32_t)) {
550 device_printf(sc->sc_dev,
551 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
558 * Apparently there can be many flags, but Linux driver
559 * parses only the first one, and so do we.
561 * XXX: why does this override IWM_UCODE_TLV_PAN?
562 * Intentional or a bug? Observations from
563 * current firmware file:
564 * 1) TLV_PAN is parsed first
565 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
566 * ==> this resets TLV_PAN to itself... hnnnk
568 sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
570 case IWM_UCODE_TLV_CSCHEME:
571 if ((error = iwm_store_cscheme(sc,
572 tlv_data, tlv_len)) != 0) {
573 device_printf(sc->sc_dev,
574 "%s: iwm_store_cscheme(): returned %d\n",
580 case IWM_UCODE_TLV_NUM_OF_CPU:
581 if (tlv_len != sizeof(uint32_t)) {
582 device_printf(sc->sc_dev,
583 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
589 if (le32toh(*(const uint32_t*)tlv_data) != 1) {
590 device_printf(sc->sc_dev,
591 "%s: driver supports "
592 "only TLV_NUM_OF_CPU == 1",
598 case IWM_UCODE_TLV_SEC_RT:
599 if ((error = iwm_firmware_store_section(sc,
600 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
601 device_printf(sc->sc_dev,
602 "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
608 case IWM_UCODE_TLV_SEC_INIT:
609 if ((error = iwm_firmware_store_section(sc,
610 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
611 device_printf(sc->sc_dev,
612 "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
618 case IWM_UCODE_TLV_SEC_WOWLAN:
619 if ((error = iwm_firmware_store_section(sc,
620 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
621 device_printf(sc->sc_dev,
622 "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
628 case IWM_UCODE_TLV_DEF_CALIB:
629 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
630 device_printf(sc->sc_dev,
631 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
634 (int) sizeof(struct iwm_tlv_calib_data));
638 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
639 device_printf(sc->sc_dev,
640 "%s: iwm_set_default_calib() failed: %d\n",
646 case IWM_UCODE_TLV_PHY_SKU:
647 if (tlv_len != sizeof(uint32_t)) {
649 device_printf(sc->sc_dev,
650 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
655 sc->sc_fw_phy_config =
656 le32toh(*(const uint32_t *)tlv_data);
659 case IWM_UCODE_TLV_API_CHANGES_SET:
660 case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
661 /* ignore, not used by current driver */
665 device_printf(sc->sc_dev,
666 "%s: unknown firmware section %d, abort\n",
672 len -= roundup(tlv_len, 4);
673 data += roundup(tlv_len, 4);
676 KASSERT(error == 0, ("unhandled error"));
680 device_printf(sc->sc_dev, "firmware parse error %d, "
681 "section type %d\n", error, tlv_type);
684 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
685 device_printf(sc->sc_dev,
686 "device uses unsupported power ops\n");
692 fw->fw_status = IWM_FW_STATUS_NONE;
693 if (fw->fw_fp != NULL)
694 iwm_fw_info_free(fw);
696 fw->fw_status = IWM_FW_STATUS_DONE;
703 * DMA resource routines
707 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
711 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
712 *(bus_addr_t *)arg = segs[0].ds_addr;
716 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
717 bus_size_t size, bus_size_t alignment)
724 error = bus_dma_tag_create(tag, alignment,
725 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
726 1, size, 0, NULL, NULL, &dma->tag);
730 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
731 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
735 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
736 iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
740 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
744 fail: iwm_dma_contig_free(dma);
749 iwm_dma_contig_free(struct iwm_dma_info *dma)
751 if (dma->map != NULL) {
752 if (dma->vaddr != NULL) {
753 bus_dmamap_sync(dma->tag, dma->map,
754 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
755 bus_dmamap_unload(dma->tag, dma->map);
756 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
759 bus_dmamap_destroy(dma->tag, dma->map);
762 if (dma->tag != NULL) {
763 bus_dma_tag_destroy(dma->tag);
769 /* fwmem is used to load firmware onto the card */
771 iwm_alloc_fwmem(struct iwm_softc *sc)
773 /* Must be aligned on a 16-byte boundary. */
774 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
775 sc->sc_fwdmasegsz, 16);
779 iwm_free_fwmem(struct iwm_softc *sc)
781 iwm_dma_contig_free(&sc->fw_dma);
784 /* tx scheduler rings. not used? */
786 iwm_alloc_sched(struct iwm_softc *sc)
790 /* TX scheduler rings must be aligned on a 1KB boundary. */
791 rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
792 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
797 iwm_free_sched(struct iwm_softc *sc)
799 iwm_dma_contig_free(&sc->sched_dma);
802 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */
804 iwm_alloc_kw(struct iwm_softc *sc)
806 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
810 iwm_free_kw(struct iwm_softc *sc)
812 iwm_dma_contig_free(&sc->kw_dma);
815 /* interrupt cause table */
817 iwm_alloc_ict(struct iwm_softc *sc)
819 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
820 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
824 iwm_free_ict(struct iwm_softc *sc)
826 iwm_dma_contig_free(&sc->ict_dma);
830 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
837 /* Allocate RX descriptors (256-byte aligned). */
838 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
839 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
841 device_printf(sc->sc_dev,
842 "could not allocate RX ring DMA memory\n");
845 ring->desc = ring->desc_dma.vaddr;
847 /* Allocate RX status area (16-byte aligned). */
848 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
849 sizeof(*ring->stat), 16);
851 device_printf(sc->sc_dev,
852 "could not allocate RX status DMA memory\n");
855 ring->stat = ring->stat_dma.vaddr;
857 /* Create RX buffer DMA tag. */
858 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
859 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
860 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
862 device_printf(sc->sc_dev,
863 "%s: could not create RX buf DMA tag, error %d\n",
869 * Allocate and map RX buffers.
871 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
872 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
878 fail: iwm_free_rx_ring(sc, ring);
883 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
886 /* XXX print out if we can't lock the NIC? */
887 if (iwm_nic_lock(sc)) {
888 /* XXX handle if RX stop doesn't finish? */
889 (void) iwm_pcie_rx_stop(sc);
892 /* Reset the ring state */
894 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
898 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
902 iwm_dma_contig_free(&ring->desc_dma);
903 iwm_dma_contig_free(&ring->stat_dma);
905 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
906 struct iwm_rx_data *data = &ring->data[i];
908 if (data->m != NULL) {
909 bus_dmamap_sync(ring->data_dmat, data->map,
910 BUS_DMASYNC_POSTREAD);
911 bus_dmamap_unload(ring->data_dmat, data->map);
915 if (data->map != NULL) {
916 bus_dmamap_destroy(ring->data_dmat, data->map);
920 if (ring->data_dmat != NULL) {
921 bus_dma_tag_destroy(ring->data_dmat);
922 ring->data_dmat = NULL;
927 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
937 /* Allocate TX descriptors (256-byte aligned). */
938 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
939 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
941 device_printf(sc->sc_dev,
942 "could not allocate TX ring DMA memory\n");
945 ring->desc = ring->desc_dma.vaddr;
948 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
949 * to allocate commands space for other rings.
951 if (qid > IWM_MVM_CMD_QUEUE)
954 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
955 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
957 device_printf(sc->sc_dev,
958 "could not allocate TX cmd DMA memory\n");
961 ring->cmd = ring->cmd_dma.vaddr;
963 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
964 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
965 IWM_MAX_SCATTER - 2, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
967 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
971 paddr = ring->cmd_dma.paddr;
972 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
973 struct iwm_tx_data *data = &ring->data[i];
975 data->cmd_paddr = paddr;
976 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
977 + offsetof(struct iwm_tx_cmd, scratch);
978 paddr += sizeof(struct iwm_device_cmd);
980 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
982 device_printf(sc->sc_dev,
983 "could not create TX buf DMA map\n");
987 KASSERT(paddr == ring->cmd_dma.paddr + size,
988 ("invalid physical address"));
991 fail: iwm_free_tx_ring(sc, ring);
996 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1000 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1001 struct iwm_tx_data *data = &ring->data[i];
1003 if (data->m != NULL) {
1004 bus_dmamap_sync(ring->data_dmat, data->map,
1005 BUS_DMASYNC_POSTWRITE);
1006 bus_dmamap_unload(ring->data_dmat, data->map);
1011 /* Clear TX descriptors. */
1012 memset(ring->desc, 0, ring->desc_dma.size);
1013 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1014 BUS_DMASYNC_PREWRITE);
1015 sc->qfullmsk &= ~(1 << ring->qid);
1021 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1025 iwm_dma_contig_free(&ring->desc_dma);
1026 iwm_dma_contig_free(&ring->cmd_dma);
1028 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1029 struct iwm_tx_data *data = &ring->data[i];
1031 if (data->m != NULL) {
1032 bus_dmamap_sync(ring->data_dmat, data->map,
1033 BUS_DMASYNC_POSTWRITE);
1034 bus_dmamap_unload(ring->data_dmat, data->map);
1038 if (data->map != NULL) {
1039 bus_dmamap_destroy(ring->data_dmat, data->map);
1043 if (ring->data_dmat != NULL) {
1044 bus_dma_tag_destroy(ring->data_dmat);
1045 ring->data_dmat = NULL;
1050 * High-level hardware frobbing routines
1054 iwm_enable_interrupts(struct iwm_softc *sc)
1056 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1057 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1061 iwm_restore_interrupts(struct iwm_softc *sc)
1063 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1067 iwm_disable_interrupts(struct iwm_softc *sc)
1069 /* disable interrupts */
1070 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1072 /* acknowledge all interrupts */
1073 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1074 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1078 iwm_ict_reset(struct iwm_softc *sc)
1080 iwm_disable_interrupts(sc);
1082 /* Reset ICT table. */
1083 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1086 /* Set physical address of ICT table (4KB aligned). */
1087 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1088 IWM_CSR_DRAM_INT_TBL_ENABLE
1089 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1090 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1092 /* Switch to ICT interrupt mode in driver. */
1093 sc->sc_flags |= IWM_FLAG_USE_ICT;
1095 /* Re-enable interrupts. */
1096 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1097 iwm_enable_interrupts(sc);
1100 /* iwlwifi pcie/trans.c */
1103 * Since this .. hard-resets things, it's time to actually
1104 * mark the first vap (if any) as having no mac context.
1105 * It's annoying, but since the driver is potentially being
1106 * stop/start'ed whilst active (thanks openbsd port!) we
1107 * have to correctly track this.
1110 iwm_stop_device(struct iwm_softc *sc)
1112 struct ieee80211com *ic = &sc->sc_ic;
1113 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1117 /* tell the device to stop sending interrupts */
1118 iwm_disable_interrupts(sc);
1121 * FreeBSD-local: mark the first vap as not-uploaded,
1122 * so the next transition through auth/assoc
1123 * will correctly populate the MAC context.
1126 struct iwm_vap *iv = IWM_VAP(vap);
1127 iv->is_uploaded = 0;
1130 /* device going down, Stop using ICT table */
1131 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1133 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1135 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1137 /* Stop all DMA channels. */
1138 if (iwm_nic_lock(sc)) {
1139 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1141 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1142 for (ntries = 0; ntries < 200; ntries++) {
1145 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1146 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1156 iwm_reset_rx_ring(sc, &sc->rxq);
1158 /* Reset all TX rings. */
1159 for (qid = 0; qid < nitems(sc->txq); qid++)
1160 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1163 * Power-down device's busmaster DMA clocks
1165 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1168 /* Make sure (redundant) we've released our request to stay awake */
1169 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1170 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1172 /* Stop the device, and put it in low power state */
1175 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1176 * Clean again the interrupt here
1178 iwm_disable_interrupts(sc);
1179 /* stop and reset the on-board processor */
1180 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1183 * Even if we stop the HW, we still want the RF kill
1186 iwm_enable_rfkill_int(sc);
1187 iwm_check_rfkill(sc);
1190 /* iwlwifi: mvm/ops.c */
1192 iwm_mvm_nic_config(struct iwm_softc *sc)
1194 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1195 uint32_t reg_val = 0;
1197 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1198 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1199 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1200 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1201 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1202 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1205 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1206 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1207 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1208 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1210 /* radio configuration */
1211 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1212 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1213 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1215 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1217 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1218 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1219 radio_cfg_step, radio_cfg_dash);
1222 * W/A : NIC is stuck in a reset state after Early PCIe power off
1223 * (PCIe power is lost before PERST# is asserted), causing ME FW
1224 * to lose ownership and not being able to obtain it back.
1226 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1227 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1228 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1232 iwm_nic_rx_init(struct iwm_softc *sc)
1234 if (!iwm_nic_lock(sc))
1238 * Initialize RX ring. This is from the iwn driver.
1240 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1243 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1244 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1245 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1246 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1247 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1249 /* Set physical address of RX ring (256-byte aligned). */
1251 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1253 /* Set physical address of RX status (16-byte aligned). */
1255 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1259 * Note: Linux driver also sets this:
1260 * (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1262 * It causes weird behavior. YMMV.
1264 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1265 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1266 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1267 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1268 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1269 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1271 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1273 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1274 if (sc->host_interrupt_operation_mode)
1275 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1278 * Thus sayeth el jefe (iwlwifi) via a comment:
1280 * This value should initially be 0 (before preparing any
1281 * RBs), should be 8 after preparing the first 8 RBs (for example)
1283 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1291 iwm_nic_tx_init(struct iwm_softc *sc)
1295 if (!iwm_nic_lock(sc))
1298 /* Deactivate TX scheduler. */
1299 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1301 /* Set physical address of "keep warm" page (16-byte aligned). */
1302 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1304 /* Initialize TX rings. */
1305 for (qid = 0; qid < nitems(sc->txq); qid++) {
1306 struct iwm_tx_ring *txq = &sc->txq[qid];
1308 /* Set physical address of TX ring (256-byte aligned). */
1309 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1310 txq->desc_dma.paddr >> 8);
1311 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1312 "%s: loading ring %d descriptors (%p) at %lx\n",
1315 (unsigned long) (txq->desc_dma.paddr >> 8));
1323 iwm_nic_init(struct iwm_softc *sc)
1330 iwm_mvm_nic_config(sc);
1332 if ((error = iwm_nic_rx_init(sc)) != 0)
1336 * Ditto for TX, from iwn
1338 if ((error = iwm_nic_tx_init(sc)) != 0)
1341 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1342 "%s: shadow registers enabled\n", __func__);
1343 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1348 enum iwm_mvm_tx_fifo {
1349 IWM_MVM_TX_FIFO_BK = 0,
1353 IWM_MVM_TX_FIFO_MCAST = 5,
1356 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1364 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
1366 if (!iwm_nic_lock(sc)) {
1367 device_printf(sc->sc_dev,
1368 "%s: cannot enable txq %d\n",
1371 return; /* XXX return EBUSY */
1374 /* unactivate before configuration */
1375 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1376 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1377 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1379 if (qid != IWM_MVM_CMD_QUEUE) {
1380 iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
1383 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1385 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1386 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1388 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1389 /* Set scheduler window size and frame limit. */
1391 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1393 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1394 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1395 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1396 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1398 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1399 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1400 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1401 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1402 IWM_SCD_QUEUE_STTS_REG_MSK);
1406 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1407 "%s: enabled txq %d FIFO %d\n",
1408 __func__, qid, fifo);
1412 iwm_post_alive(struct iwm_softc *sc)
1417 if (!iwm_nic_lock(sc))
1420 if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
1421 device_printf(sc->sc_dev,
1422 "%s: sched addr mismatch",
1430 /* Clear TX scheduler state in SRAM. */
1431 nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1432 IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1434 error = iwm_write_mem(sc,
1435 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1440 /* Set physical address of TX scheduler rings (1KB aligned). */
1441 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1443 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1445 /* enable command channel */
1446 iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
1448 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1450 /* Enable DMA channels. */
1451 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1452 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1453 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1454 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1457 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1458 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1460 /* Enable L1-Active */
1461 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1462 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1470 * NVM read access and content parsing. We do not support
1471 * external NVM or writing NVM.
1475 /* list of NVM sections we are allowed/need to read */
1476 const int nvm_to_read[] = {
1477 IWM_NVM_SECTION_TYPE_HW,
1478 IWM_NVM_SECTION_TYPE_SW,
1479 IWM_NVM_SECTION_TYPE_CALIBRATION,
1480 IWM_NVM_SECTION_TYPE_PRODUCTION,
1483 /* Default NVM size to read */
1484 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1485 #define IWM_MAX_NVM_SECTION_SIZE 7000
1487 #define IWM_NVM_WRITE_OPCODE 1
1488 #define IWM_NVM_READ_OPCODE 0
1491 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1492 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1495 struct iwm_nvm_access_cmd nvm_access_cmd = {
1496 .offset = htole16(offset),
1497 .length = htole16(length),
1498 .type = htole16(section),
1499 .op_code = IWM_NVM_READ_OPCODE,
1501 struct iwm_nvm_access_resp *nvm_resp;
1502 struct iwm_rx_packet *pkt;
1503 struct iwm_host_cmd cmd = {
1504 .id = IWM_NVM_ACCESS_CMD,
1505 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1506 IWM_CMD_SEND_IN_RFKILL,
1507 .data = { &nvm_access_cmd, },
1509 int ret, bytes_read, offset_read;
1512 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1514 ret = iwm_send_cmd(sc, &cmd);
1519 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1520 device_printf(sc->sc_dev,
1521 "%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1522 __func__, pkt->hdr.flags);
1527 /* Extract NVM response */
1528 nvm_resp = (void *)pkt->data;
1530 ret = le16toh(nvm_resp->status);
1531 bytes_read = le16toh(nvm_resp->length);
1532 offset_read = le16toh(nvm_resp->offset);
1533 resp_data = nvm_resp->data;
1535 device_printf(sc->sc_dev,
1536 "%s: NVM access command failed with status %d\n",
1542 if (offset_read != offset) {
1543 device_printf(sc->sc_dev,
1544 "%s: NVM ACCESS response with invalid offset %d\n",
1545 __func__, offset_read);
1550 memcpy(data + offset, resp_data, bytes_read);
1554 iwm_free_resp(sc, &cmd);
1559 * Reads an NVM section completely.
1560 * NICs prior to 7000 family doesn't have a real NVM, but just read
1561 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1562 * by uCode, we need to manually check in this case that we don't
1563 * overflow and try to read more than the EEPROM size.
1564 * For 7000 family NICs, we supply the maximal size we can read, and
1565 * the uCode fills the response with as much data as we can,
1566 * without overflowing, so no check is needed.
1569 iwm_nvm_read_section(struct iwm_softc *sc,
1570 uint16_t section, uint8_t *data, uint16_t *len)
1572 uint16_t length, seglen;
1575 /* Set nvm section read length */
1576 length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1579 /* Read the NVM until exhausted (reading less than requested) */
1580 while (seglen == length) {
1581 error = iwm_nvm_read_chunk(sc,
1582 section, *len, length, data, &seglen);
1584 device_printf(sc->sc_dev,
1585 "Cannot read NVM from section "
1586 "%d offset %d, length %d\n",
1587 section, *len, length);
1593 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1594 "NVM section %d read completed\n", section);
1599 * BEGIN IWM_NVM_PARSE
1602 /* iwlwifi/iwl-nvm-parse.c */
1604 /* NVM offsets (in words) definitions */
1605 enum wkp_nvm_offsets {
1606 /* NVM HW-Section offset (in words) definitions */
1609 /* NVM SW-Section offset (in words) definitions */
1610 IWM_NVM_SW_SECTION = 0x1C0,
1611 IWM_NVM_VERSION = 0,
1615 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1617 /* NVM calibration section offset (in words) definitions */
1618 IWM_NVM_CALIB_SECTION = 0x2B8,
1619 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1622 /* SKU Capabilities (actual values from NVM definition) */
1624 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
1625 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
1626 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
1627 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
1630 /* radio config bits (actual values from NVM definition) */
1631 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
1632 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
1633 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
1634 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
1635 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
1636 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1638 #define DEFAULT_MAX_TX_POWER 16
1641 * enum iwm_nvm_channel_flags - channel flags in NVM
1642 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1643 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1644 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1645 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1646 * XXX cannot find this (DFS) flag in iwl-nvm-parse.c
1647 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1648 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1649 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1650 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1651 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1653 enum iwm_nvm_channel_flags {
1654 IWM_NVM_CHANNEL_VALID = (1 << 0),
1655 IWM_NVM_CHANNEL_IBSS = (1 << 1),
1656 IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1657 IWM_NVM_CHANNEL_RADAR = (1 << 4),
1658 IWM_NVM_CHANNEL_DFS = (1 << 7),
1659 IWM_NVM_CHANNEL_WIDE = (1 << 8),
1660 IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1661 IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1662 IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1666 * Translate EEPROM flags to net80211.
1669 iwm_eeprom_channel_flags(uint16_t ch_flags)
1674 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1675 nflags |= IEEE80211_CHAN_PASSIVE;
1676 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1677 nflags |= IEEE80211_CHAN_NOADHOC;
1678 if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1679 nflags |= IEEE80211_CHAN_DFS;
1681 nflags |= IEEE80211_CHAN_NOADHOC;
1688 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1689 int maxchans, int *nchans, int ch_idx, int ch_num, const uint8_t bands[])
1691 const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
1697 for (; ch_idx < ch_num; ch_idx++) {
1698 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1699 ieee = iwm_nvm_channels[ch_idx];
1701 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1702 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1703 "Ch. %d Flags %x [%sGHz] - No traffic\n",
1705 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1710 nflags = iwm_eeprom_channel_flags(ch_flags);
1711 error = ieee80211_add_channel(chans, maxchans, nchans,
1712 ieee, 0, 0, nflags, bands);
1716 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1717 "Ch. %d Flags %x [%sGHz] - Added\n",
1719 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1725 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1726 struct ieee80211_channel chans[])
1728 struct iwm_softc *sc = ic->ic_softc;
1729 struct iwm_nvm_data *data = &sc->sc_nvm;
1730 uint8_t bands[IEEE80211_MODE_BYTES];
1732 memset(bands, 0, sizeof(bands));
1733 /* 1-13: 11b/g channels. */
1734 setbit(bands, IEEE80211_MODE_11B);
1735 setbit(bands, IEEE80211_MODE_11G);
1736 iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1737 IWM_NUM_2GHZ_CHANNELS - 1, bands);
1739 /* 14: 11b channel only. */
1740 clrbit(bands, IEEE80211_MODE_11G);
1741 iwm_add_channel_band(sc, chans, maxchans, nchans,
1742 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
1744 if (data->sku_cap_band_52GHz_enable) {
1745 memset(bands, 0, sizeof(bands));
1746 setbit(bands, IEEE80211_MODE_11A);
1747 iwm_add_channel_band(sc, chans, maxchans, nchans,
1748 IWM_NUM_2GHZ_CHANNELS, nitems(iwm_nvm_channels), bands);
1753 iwm_parse_nvm_data(struct iwm_softc *sc,
1754 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
1755 const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
1757 struct iwm_nvm_data *data = &sc->sc_nvm;
1758 uint8_t hw_addr[IEEE80211_ADDR_LEN];
1759 uint16_t radio_cfg, sku;
1761 data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
1763 radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
1764 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
1765 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
1766 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
1767 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
1768 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
1769 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
1771 sku = le16_to_cpup(nvm_sw + IWM_SKU);
1772 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
1773 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
1774 data->sku_cap_11n_enable = 0;
1776 if (!data->valid_tx_ant || !data->valid_rx_ant) {
1777 device_printf(sc->sc_dev,
1778 "%s: invalid antennas (0x%x, 0x%x)\n",
1779 __func__, data->valid_tx_ant,
1780 data->valid_rx_ant);
1784 data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
1786 data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
1787 data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
1789 /* The byte order is little endian 16 bit, meaning 214365 */
1790 IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
1791 data->hw_addr[0] = hw_addr[1];
1792 data->hw_addr[1] = hw_addr[0];
1793 data->hw_addr[2] = hw_addr[3];
1794 data->hw_addr[3] = hw_addr[2];
1795 data->hw_addr[4] = hw_addr[5];
1796 data->hw_addr[5] = hw_addr[4];
1798 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
1799 sizeof(data->nvm_ch_flags));
1800 data->calib_version = 255; /* TODO:
1801 this value will prevent some checks from
1802 failing, we need to check if this
1803 field is still needed, and if it does,
1804 where is it in the NVM */
1813 struct iwm_nvm_section {
1815 const uint8_t *data;
1819 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
1821 const uint16_t *hw, *sw, *calib;
1823 /* Checking for required sections */
1824 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
1825 !sections[IWM_NVM_SECTION_TYPE_HW].data) {
1826 device_printf(sc->sc_dev,
1827 "%s: Can't parse empty NVM sections\n",
1832 hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
1833 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
1834 calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
1835 return iwm_parse_nvm_data(sc, hw, sw, calib,
1836 IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
1840 iwm_nvm_init(struct iwm_softc *sc)
1842 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
1843 int i, section, error;
1845 uint8_t *nvm_buffer, *temp;
1847 /* Read From FW NVM */
1848 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1852 /* TODO: find correct NVM max size for a section */
1853 nvm_buffer = malloc(IWM_OTP_LOW_IMAGE_SIZE, M_DEVBUF, M_NOWAIT);
1854 if (nvm_buffer == NULL)
1856 for (i = 0; i < nitems(nvm_to_read); i++) {
1857 section = nvm_to_read[i];
1858 KASSERT(section <= nitems(nvm_sections),
1859 ("too many sections"));
1861 error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
1865 temp = malloc(len, M_DEVBUF, M_NOWAIT);
1870 memcpy(temp, nvm_buffer, len);
1871 nvm_sections[section].data = temp;
1872 nvm_sections[section].length = len;
1874 free(nvm_buffer, M_DEVBUF);
1878 return iwm_parse_nvm_sections(sc, nvm_sections);
1882 * Firmware loading gunk. This is kind of a weird hybrid between the
1883 * iwn driver and the Linux iwlwifi driver.
1887 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
1888 const uint8_t *section, uint32_t byte_cnt)
1890 struct iwm_dma_info *dma = &sc->fw_dma;
1893 /* Copy firmware section into pre-allocated DMA-safe memory. */
1894 memcpy(dma->vaddr, section, byte_cnt);
1895 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1897 if (!iwm_nic_lock(sc))
1900 sc->sc_fw_chunk_done = 0;
1902 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
1903 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
1904 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
1906 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
1907 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
1908 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
1909 (iwm_get_dma_hi_addr(dma->paddr)
1910 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
1911 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
1912 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
1913 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
1914 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
1915 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
1916 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1917 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
1918 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
1922 /* wait 1s for this segment to load */
1923 while (!sc->sc_fw_chunk_done)
1924 if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
1931 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
1933 struct iwm_fw_sects *fws;
1939 sc->sc_uc.uc_intr = 0;
1941 fws = &sc->sc_fw.fw_sects[ucode_type];
1942 for (i = 0; i < fws->fw_count; i++) {
1943 data = fws->fw_sect[i].fws_data;
1944 dlen = fws->fw_sect[i].fws_len;
1945 offset = fws->fw_sect[i].fws_devoff;
1946 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
1947 "LOAD FIRMWARE type %d offset %u len %d\n",
1948 ucode_type, offset, dlen);
1949 error = iwm_firmware_load_chunk(sc, offset, data, dlen);
1951 device_printf(sc->sc_dev,
1952 "%s: chunk %u of %u returned error %02d\n",
1953 __func__, i, fws->fw_count, error);
1958 /* wait for the firmware to load */
1959 IWM_WRITE(sc, IWM_CSR_RESET, 0);
1961 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
1962 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
1968 /* iwlwifi: pcie/trans.c */
1970 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
1974 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1976 if ((error = iwm_nic_init(sc)) != 0) {
1977 device_printf(sc->sc_dev, "unable to init nic\n");
1981 /* make sure rfkill handshake bits are cleared */
1982 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
1983 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
1984 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1986 /* clear (again), then enable host interrupts */
1987 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1988 iwm_enable_interrupts(sc);
1990 /* really make sure rfkill handshake bits are cleared */
1991 /* maybe we should write a few times more? just to make sure */
1992 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
1993 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
1995 /* Load the given image to the HW */
1996 return iwm_load_firmware(sc, ucode_type);
2000 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
2002 return iwm_post_alive(sc);
2006 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2008 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2009 .valid = htole32(valid_tx_ant),
2012 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2013 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2016 /* iwlwifi: mvm/fw.c */
2018 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2020 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2021 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2023 /* Set parameters */
2024 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2025 phy_cfg_cmd.calib_control.event_trigger =
2026 sc->sc_default_calib[ucode_type].event_trigger;
2027 phy_cfg_cmd.calib_control.flow_trigger =
2028 sc->sc_default_calib[ucode_type].flow_trigger;
2030 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2031 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2032 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2033 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2037 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2038 enum iwm_ucode_type ucode_type)
2040 enum iwm_ucode_type old_type = sc->sc_uc_current;
2043 if ((error = iwm_read_firmware(sc, ucode_type)) != 0)
2046 sc->sc_uc_current = ucode_type;
2047 error = iwm_start_fw(sc, ucode_type);
2049 sc->sc_uc_current = old_type;
2053 return iwm_fw_alive(sc, sc->sched_base);
2061 * follows iwlwifi/fw.c
2064 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2068 /* do not operate with rfkill switch turned on */
2069 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2070 device_printf(sc->sc_dev,
2071 "radio is disabled by hardware switch\n");
2075 sc->sc_init_complete = 0;
2076 if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2077 IWM_UCODE_TYPE_INIT)) != 0)
2081 if ((error = iwm_nvm_init(sc)) != 0) {
2082 device_printf(sc->sc_dev, "failed to read nvm\n");
2085 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2087 sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
2088 + sc->sc_capa_max_probe_len
2089 + IWM_MAX_NUM_SCAN_CHANNELS
2090 * sizeof(struct iwm_scan_channel);
2091 sc->sc_scan_cmd = malloc(sc->sc_scan_cmd_len, M_DEVBUF,
2093 if (sc->sc_scan_cmd == NULL)
2099 /* Send TX valid antennas before triggering calibrations */
2100 if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
2104 * Send phy configurations command to init uCode
2105 * to start the 16.0 uCode init image internal calibrations.
2107 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2108 device_printf(sc->sc_dev,
2109 "%s: failed to run internal calibration: %d\n",
2115 * Nothing to do but wait for the init complete notification
2118 while (!sc->sc_init_complete)
2119 if ((error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2120 0, "iwminit", 2*hz)) != 0)
2130 /* (re)stock rx ring, called at init-time and at runtime */
2132 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2134 struct iwm_rx_ring *ring = &sc->rxq;
2135 struct iwm_rx_data *data = &ring->data[idx];
2140 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2144 if (data->m != NULL)
2145 bus_dmamap_unload(ring->data_dmat, data->map);
2147 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2148 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2150 device_printf(sc->sc_dev,
2151 "%s: could not create RX buf DMA map, error %d\n",
2156 error = bus_dmamap_load(ring->data_dmat, data->map,
2157 mtod(data->m, void *), IWM_RBUF_SIZE, iwm_dma_map_addr,
2158 &paddr, BUS_DMA_NOWAIT);
2159 if (error != 0 && error != EFBIG) {
2160 device_printf(sc->sc_dev,
2161 "%s: can't not map mbuf, error %d\n", __func__,
2165 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2167 /* Update RX descriptor. */
2168 ring->desc[idx] = htole32(paddr >> 8);
2169 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2170 BUS_DMASYNC_PREWRITE);
2177 /* iwlwifi: mvm/rx.c */
2178 #define IWM_RSSI_OFFSET 50
2180 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2182 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2183 uint32_t agc_a, agc_b;
2186 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2187 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2188 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2190 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2191 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2192 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2195 * dBm = rssi dB - agc dB - constant.
2196 * Higher AGC (higher radio gain) means lower signal.
2198 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2199 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2200 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2202 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2203 "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2204 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2206 return max_rssi_dbm;
2209 /* iwlwifi: mvm/rx.c */
2211 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2212 * values are reported by the fw as positive values - need to negate
2213 * to obtain their dBM. Account for missing antennas by replacing 0
2214 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2217 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2219 int energy_a, energy_b, energy_c, max_energy;
2222 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2223 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2224 IWM_RX_INFO_ENERGY_ANT_A_POS;
2225 energy_a = energy_a ? -energy_a : -256;
2226 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2227 IWM_RX_INFO_ENERGY_ANT_B_POS;
2228 energy_b = energy_b ? -energy_b : -256;
2229 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2230 IWM_RX_INFO_ENERGY_ANT_C_POS;
2231 energy_c = energy_c ? -energy_c : -256;
2232 max_energy = MAX(energy_a, energy_b);
2233 max_energy = MAX(max_energy, energy_c);
2235 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2236 "energy In A %d B %d C %d , and max %d\n",
2237 energy_a, energy_b, energy_c, max_energy);
2243 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2244 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2246 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2248 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2249 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2251 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2255 * Retrieve the average noise (in dBm) among receivers.
2258 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2260 int i, total, nbant, noise;
2262 total = nbant = noise = 0;
2263 for (i = 0; i < 3; i++) {
2264 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2271 /* There should be at least one antenna but check anyway. */
2272 return (nbant == 0) ? -127 : (total / nbant) - 107;
2276 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2278 * Handles the actual data of the Rx packet from the fw
2281 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2282 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2284 struct ieee80211com *ic = &sc->sc_ic;
2285 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2286 struct ieee80211_frame *wh;
2287 struct ieee80211_node *ni;
2288 struct ieee80211_rx_stats rxs;
2290 struct iwm_rx_phy_info *phy_info;
2291 struct iwm_rx_mpdu_res_start *rx_res;
2293 uint32_t rx_pkt_status;
2296 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2298 phy_info = &sc->sc_last_phy_info;
2299 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2300 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2301 len = le16toh(rx_res->byte_count);
2302 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2305 m->m_data = pkt->data + sizeof(*rx_res);
2306 m->m_pkthdr.len = m->m_len = len;
2308 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2309 device_printf(sc->sc_dev,
2310 "dsp size out of range [0,20]: %d\n",
2311 phy_info->cfg_phy_cnt);
2315 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2316 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2317 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2318 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2322 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2323 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2325 rssi = iwm_mvm_calc_rssi(sc, phy_info);
2327 rssi = (0 - IWM_MIN_DBM) + rssi; /* normalize */
2328 rssi = MIN(rssi, sc->sc_max_rssi); /* clip to max. 100% */
2330 /* replenish ring for the buffer we're going to feed to the sharks */
2331 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
2332 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
2337 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2339 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2340 "%s: phy_info: channel=%d, flags=0x%08x\n",
2342 le16toh(phy_info->channel),
2343 le16toh(phy_info->phy_flags));
2346 * Populate an RX state struct with the provided information.
2348 bzero(&rxs, sizeof(rxs));
2349 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
2350 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
2351 rxs.c_ieee = le16toh(phy_info->channel);
2352 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2353 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
2355 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
2357 rxs.rssi = rssi - sc->sc_noise;
2358 rxs.nf = sc->sc_noise;
2360 if (ieee80211_radiotap_active_vap(vap)) {
2361 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
2364 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
2365 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2366 tap->wr_chan_freq = htole16(rxs.c_freq);
2367 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
2368 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
2369 tap->wr_dbm_antsignal = (int8_t)rssi;
2370 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
2371 tap->wr_tsft = phy_info->system_timestamp;
2372 switch (phy_info->rate) {
2374 case 10: tap->wr_rate = 2; break;
2375 case 20: tap->wr_rate = 4; break;
2376 case 55: tap->wr_rate = 11; break;
2377 case 110: tap->wr_rate = 22; break;
2379 case 0xd: tap->wr_rate = 12; break;
2380 case 0xf: tap->wr_rate = 18; break;
2381 case 0x5: tap->wr_rate = 24; break;
2382 case 0x7: tap->wr_rate = 36; break;
2383 case 0x9: tap->wr_rate = 48; break;
2384 case 0xb: tap->wr_rate = 72; break;
2385 case 0x1: tap->wr_rate = 96; break;
2386 case 0x3: tap->wr_rate = 108; break;
2387 /* Unknown rate: should not happen. */
2388 default: tap->wr_rate = 0;
2394 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
2395 ieee80211_input_mimo(ni, m, &rxs);
2396 ieee80211_free_node(ni);
2398 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
2399 ieee80211_input_mimo_all(ic, m, &rxs);
2405 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
2406 struct iwm_node *in)
2408 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
2409 struct ieee80211_node *ni = &in->in_ni;
2410 struct ieee80211vap *vap = ni->ni_vap;
2411 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
2412 int failack = tx_resp->failure_frame;
2414 KASSERT(tx_resp->frame_count == 1, ("too many frames"));
2416 /* Update rate control statistics. */
2417 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
2419 (int) le16toh(tx_resp->status.status),
2420 (int) le16toh(tx_resp->status.sequence),
2421 tx_resp->frame_count,
2422 tx_resp->bt_kill_count,
2423 tx_resp->failure_rts,
2424 tx_resp->failure_frame,
2425 le32toh(tx_resp->initial_rate),
2426 (int) le16toh(tx_resp->wireless_media_time));
2428 if (status != IWM_TX_STATUS_SUCCESS &&
2429 status != IWM_TX_STATUS_DIRECT_DONE) {
2430 ieee80211_ratectl_tx_complete(vap, ni,
2431 IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
2434 ieee80211_ratectl_tx_complete(vap, ni,
2435 IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
2441 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
2442 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2444 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
2445 int idx = cmd_hdr->idx;
2446 int qid = cmd_hdr->qid;
2447 struct iwm_tx_ring *ring = &sc->txq[qid];
2448 struct iwm_tx_data *txd = &ring->data[idx];
2449 struct iwm_node *in = txd->in;
2450 struct mbuf *m = txd->m;
2453 KASSERT(txd->done == 0, ("txd not done"));
2454 KASSERT(txd->in != NULL, ("txd without node"));
2455 KASSERT(txd->m != NULL, ("txd without mbuf"));
2457 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2459 sc->sc_tx_timer = 0;
2461 status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
2463 /* Unmap and free mbuf. */
2464 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
2465 bus_dmamap_unload(ring->data_dmat, txd->map);
2467 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2468 "free txd %p, in %p\n", txd, txd->in);
2473 ieee80211_tx_complete(&in->in_ni, m, status);
2475 if (--ring->queued < IWM_TX_RING_LOMARK) {
2476 sc->qfullmsk &= ~(1 << ring->qid);
2477 if (sc->qfullmsk == 0) {
2479 * Well, we're in interrupt context, but then again
2480 * I guess net80211 does all sorts of stunts in
2481 * interrupt context, so maybe this is no biggie.
2493 * Process a "command done" firmware notification. This is where we wakeup
2494 * processes waiting for a synchronous command completion.
2498 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
2500 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
2501 struct iwm_tx_data *data;
2503 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
2504 return; /* Not a command ack. */
2507 data = &ring->data[pkt->hdr.idx];
2509 /* If the command was mapped in an mbuf, free it. */
2510 if (data->m != NULL) {
2511 bus_dmamap_sync(ring->data_dmat, data->map,
2512 BUS_DMASYNC_POSTWRITE);
2513 bus_dmamap_unload(ring->data_dmat, data->map);
2517 wakeup(&ring->desc[pkt->hdr.idx]);
2522 * necessary only for block ack mode
2525 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
2528 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
2531 scd_bc_tbl = sc->sched_dma.vaddr;
2533 len += 8; /* magic numbers came naturally from paris */
2534 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
2535 len = roundup(len, 4) / 4;
2537 w_val = htole16(sta_id << 12 | len);
2539 /* Update TX scheduler. */
2540 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
2541 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2542 BUS_DMASYNC_PREWRITE);
2544 /* I really wonder what this is ?!? */
2545 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
2546 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
2547 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2548 BUS_DMASYNC_PREWRITE);
2554 * Take an 802.11 (non-n) rate, find the relevant rate
2555 * table entry. return the index into in_ridx[].
2557 * The caller then uses that index back into in_ridx
2558 * to figure out the rate index programmed /into/
2559 * the firmware for this given node.
2562 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
2568 for (i = 0; i < nitems(in->in_ridx); i++) {
2569 r = iwm_rates[in->in_ridx[i]].rate;
2573 /* XXX Return the first */
2574 /* XXX TODO: have it return the /lowest/ */
2579 * Fill in the rate related information for a transmit command.
2581 static const struct iwm_rate *
2582 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
2583 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
2585 struct ieee80211com *ic = &sc->sc_ic;
2586 struct ieee80211_node *ni = &in->in_ni;
2587 const struct iwm_rate *rinfo;
2588 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2589 int ridx, rate_flags;
2591 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
2592 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
2595 * XXX TODO: everything about the rate selection here is terrible!
2598 if (type == IEEE80211_FC0_TYPE_DATA) {
2600 /* for data frames, use RS table */
2601 (void) ieee80211_ratectl_rate(ni, NULL, 0);
2602 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
2603 ridx = in->in_ridx[i];
2605 /* This is the index into the programmed table */
2606 tx->initial_rate_index = i;
2607 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
2608 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
2609 "%s: start with i=%d, txrate %d\n",
2610 __func__, i, iwm_rates[ridx].rate);
2613 * For non-data, use the lowest supported rate for the given
2616 * Note: there may not be any rate control information available.
2617 * This driver currently assumes if we're transmitting data
2618 * frames, use the rate control table. Grr.
2620 * XXX TODO: use the configured rate for the traffic type!
2621 * XXX TODO: this should be per-vap, not curmode; as we later
2622 * on we'll want to handle off-channel stuff (eg TDLS).
2624 if (ic->ic_curmode == IEEE80211_MODE_11A) {
2626 * XXX this assumes the mode is either 11a or not 11a;
2627 * definitely won't work for 11n.
2629 ridx = IWM_RIDX_OFDM;
2631 ridx = IWM_RIDX_CCK;
2635 rinfo = &iwm_rates[ridx];
2637 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
2640 !! (IWM_RIDX_IS_CCK(ridx))
2643 /* XXX TODO: hard-coded TX antenna? */
2644 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
2645 if (IWM_RIDX_IS_CCK(ridx))
2646 rate_flags |= IWM_RATE_MCS_CCK_MSK;
2647 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
2654 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
2656 struct ieee80211com *ic = &sc->sc_ic;
2657 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2658 struct iwm_node *in = IWM_NODE(ni);
2659 struct iwm_tx_ring *ring;
2660 struct iwm_tx_data *data;
2661 struct iwm_tfd *desc;
2662 struct iwm_device_cmd *cmd;
2663 struct iwm_tx_cmd *tx;
2664 struct ieee80211_frame *wh;
2665 struct ieee80211_key *k = NULL;
2667 const struct iwm_rate *rinfo;
2670 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
2673 int i, totlen, error, pad;
2675 wh = mtod(m, struct ieee80211_frame *);
2676 hdrlen = ieee80211_anyhdrsize(wh);
2677 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2679 ring = &sc->txq[ac];
2680 desc = &ring->desc[ring->cur];
2681 memset(desc, 0, sizeof(*desc));
2682 data = &ring->data[ring->cur];
2684 /* Fill out iwm_tx_cmd to send to the firmware */
2685 cmd = &ring->cmd[ring->cur];
2686 cmd->hdr.code = IWM_TX_CMD;
2688 cmd->hdr.qid = ring->qid;
2689 cmd->hdr.idx = ring->cur;
2691 tx = (void *)cmd->data;
2692 memset(tx, 0, sizeof(*tx));
2694 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
2696 /* Encrypt the frame if need be. */
2697 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2698 /* Retrieve key for TX && do software encryption. */
2699 k = ieee80211_crypto_encap(ni, m);
2704 /* 802.11 header may have moved. */
2705 wh = mtod(m, struct ieee80211_frame *);
2708 if (ieee80211_radiotap_active_vap(vap)) {
2709 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
2712 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
2713 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
2714 tap->wt_rate = rinfo->rate;
2716 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2717 ieee80211_radiotap_tx(vap, m);
2721 totlen = m->m_pkthdr.len;
2724 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2725 flags |= IWM_TX_CMD_FLG_ACK;
2728 if (type != IEEE80211_FC0_TYPE_DATA
2729 && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
2730 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2731 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
2734 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2735 type != IEEE80211_FC0_TYPE_DATA)
2736 tx->sta_id = sc->sc_aux_sta.sta_id;
2738 tx->sta_id = IWM_STATION_ID;
2740 if (type == IEEE80211_FC0_TYPE_MGT) {
2741 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2743 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2744 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2745 tx->pm_frame_timeout = htole16(3);
2747 tx->pm_frame_timeout = htole16(2);
2749 tx->pm_frame_timeout = htole16(0);
2753 /* First segment length must be a multiple of 4. */
2754 flags |= IWM_TX_CMD_FLG_MH_PAD;
2755 pad = 4 - (hdrlen & 3);
2759 tx->driver_txop = 0;
2760 tx->next_frame_len = 0;
2762 tx->len = htole16(totlen);
2763 tx->tid_tspec = tid;
2764 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
2766 /* Set physical address of "scratch area". */
2767 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
2768 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
2770 /* Copy 802.11 header in TX command. */
2771 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
2773 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
2776 tx->tx_flags |= htole32(flags);
2778 /* Trim 802.11 header. */
2780 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2781 segs, &nsegs, BUS_DMA_NOWAIT);
2783 if (error != EFBIG) {
2784 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2789 /* Too many DMA segments, linearize mbuf. */
2790 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
2792 device_printf(sc->sc_dev,
2793 "%s: could not defrag mbuf\n", __func__);
2799 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2800 segs, &nsegs, BUS_DMA_NOWAIT);
2802 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2812 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2813 "sending txd %p, in %p\n", data, data->in);
2814 KASSERT(data->in != NULL, ("node is NULL"));
2816 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2817 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%d\n",
2818 ring->qid, ring->cur, totlen, nsegs,
2819 le32toh(tx->tx_flags),
2820 le32toh(tx->rate_n_flags),
2821 (int) tx->initial_rate_index
2824 /* Fill TX descriptor. */
2825 desc->num_tbs = 2 + nsegs;
2827 desc->tbs[0].lo = htole32(data->cmd_paddr);
2828 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
2830 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
2831 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
2832 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
2833 + hdrlen + pad - TB0_SIZE) << 4);
2835 /* Other DMA segments are for data payload. */
2836 for (i = 0; i < nsegs; i++) {
2838 desc->tbs[i+2].lo = htole32(seg->ds_addr);
2839 desc->tbs[i+2].hi_n_len = \
2840 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
2841 | ((seg->ds_len) << 4);
2844 bus_dmamap_sync(ring->data_dmat, data->map,
2845 BUS_DMASYNC_PREWRITE);
2846 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
2847 BUS_DMASYNC_PREWRITE);
2848 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2849 BUS_DMASYNC_PREWRITE);
2852 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
2856 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
2857 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2859 /* Mark TX ring as full if we reach a certain threshold. */
2860 if (++ring->queued > IWM_TX_RING_HIMARK) {
2861 sc->qfullmsk |= 1 << ring->qid;
2868 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
2869 const struct ieee80211_bpf_params *params)
2871 struct ieee80211com *ic = ni->ni_ic;
2872 struct iwm_softc *sc = ic->ic_softc;
2875 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2876 "->%s begin\n", __func__);
2878 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
2880 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2881 "<-%s not RUNNING\n", __func__);
2887 if (params == NULL) {
2888 error = iwm_tx(sc, m, ni, 0);
2890 error = iwm_tx(sc, m, ni, 0);
2892 sc->sc_tx_timer = 5;
2904 * Note that there are transports that buffer frames before they reach
2905 * the firmware. This means that after flush_tx_path is called, the
2906 * queue might not be empty. The race-free way to handle this is to:
2907 * 1) set the station as draining
2908 * 2) flush the Tx path
2909 * 3) wait for the transport queues to be empty
2912 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
2914 struct iwm_tx_path_flush_cmd flush_cmd = {
2915 .queues_ctl = htole32(tfd_msk),
2916 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
2920 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
2921 sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
2922 sizeof(flush_cmd), &flush_cmd);
2924 device_printf(sc->sc_dev,
2925 "Flushing tx queue failed: %d\n", ret);
2935 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
2936 struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
2938 memset(cmd_v5, 0, sizeof(*cmd_v5));
2940 cmd_v5->add_modify = cmd_v6->add_modify;
2941 cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
2942 cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
2943 IEEE80211_ADDR_COPY(cmd_v5->addr, cmd_v6->addr);
2944 cmd_v5->sta_id = cmd_v6->sta_id;
2945 cmd_v5->modify_mask = cmd_v6->modify_mask;
2946 cmd_v5->station_flags = cmd_v6->station_flags;
2947 cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
2948 cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
2949 cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
2950 cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
2951 cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
2952 cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
2953 cmd_v5->assoc_id = cmd_v6->assoc_id;
2954 cmd_v5->beamform_flags = cmd_v6->beamform_flags;
2955 cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
2959 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
2960 struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
2962 struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
2964 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
2965 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
2966 sizeof(*cmd), cmd, status);
2969 iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
2971 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
2975 /* send station add/update command to firmware */
2977 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
2979 struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
2983 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
2985 add_sta_cmd.sta_id = IWM_STATION_ID;
2986 add_sta_cmd.mac_id_n_color
2987 = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
2988 IWM_DEFAULT_COLOR));
2990 add_sta_cmd.tfd_queue_msk = htole32(0xf);
2991 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
2993 add_sta_cmd.add_modify = update ? 1 : 0;
2994 add_sta_cmd.station_flags_msk
2995 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
2997 status = IWM_ADD_STA_SUCCESS;
2998 ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3003 case IWM_ADD_STA_SUCCESS:
3007 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3015 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3019 ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
3027 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3029 return iwm_mvm_sta_send_to_fw(sc, in, 1);
3033 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3034 const uint8_t *addr, uint16_t mac_id, uint16_t color)
3036 struct iwm_mvm_add_sta_cmd_v6 cmd;
3040 memset(&cmd, 0, sizeof(cmd));
3041 cmd.sta_id = sta->sta_id;
3042 cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3044 cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3047 IEEE80211_ADDR_COPY(cmd.addr, addr);
3049 ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3054 case IWM_ADD_STA_SUCCESS:
3055 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3056 "%s: Internal station added.\n", __func__);
3059 device_printf(sc->sc_dev,
3060 "%s: Add internal station failed, status=0x%x\n",
3069 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3073 sc->sc_aux_sta.sta_id = 3;
3074 sc->sc_aux_sta.tfd_queue_msk = 0;
3076 ret = iwm_mvm_add_int_sta_common(sc,
3077 &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3080 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3093 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3095 struct iwm_time_quota_cmd cmd;
3096 int i, idx, ret, num_active_macs, quota, quota_rem;
3097 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3098 int n_ifs[IWM_MAX_BINDINGS] = {0, };
3101 memset(&cmd, 0, sizeof(cmd));
3103 /* currently, PHY ID == binding ID */
3105 id = in->in_phyctxt->id;
3106 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3107 colors[id] = in->in_phyctxt->color;
3114 * The FW's scheduling session consists of
3115 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3116 * equally between all the bindings that require quota
3118 num_active_macs = 0;
3119 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3120 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3121 num_active_macs += n_ifs[i];
3126 if (num_active_macs) {
3127 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3128 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3131 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3135 cmd.quotas[idx].id_and_color =
3136 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3138 if (n_ifs[i] <= 0) {
3139 cmd.quotas[idx].quota = htole32(0);
3140 cmd.quotas[idx].max_duration = htole32(0);
3142 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3143 cmd.quotas[idx].max_duration = htole32(0);
3148 /* Give the remainder of the session to the first binding */
3149 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3151 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3154 device_printf(sc->sc_dev,
3155 "%s: Failed to send quota: %d\n", __func__, ret);
3164 * ieee80211 routines
3168 * Change to AUTH state in 80211 state machine. Roughly matches what
3169 * Linux does in bss_info_changed().
3172 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3174 struct ieee80211_node *ni;
3175 struct iwm_node *in;
3176 struct iwm_vap *iv = IWM_VAP(vap);
3181 * XXX i have a feeling that the vap node is being
3182 * freed from underneath us. Grr.
3184 ni = ieee80211_ref_node(vap->iv_bss);
3186 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3187 "%s: called; vap=%p, bss ni=%p\n",
3194 error = iwm_allow_mcast(vap, sc);
3196 device_printf(sc->sc_dev,
3197 "%s: failed to set multicast\n", __func__);
3202 * This is where it deviates from what Linux does.
3204 * Linux iwlwifi doesn't reset the nic each time, nor does it
3205 * call ctxt_add() here. Instead, it adds it during vap creation,
3206 * and always does does a mac_ctx_changed().
3208 * The openbsd port doesn't attempt to do that - it reset things
3209 * at odd states and does the add here.
3211 * So, until the state handling is fixed (ie, we never reset
3212 * the NIC except for a firmware failure, which should drag
3213 * the NIC back to IDLE, re-setup and re-add all the mac/phy
3214 * contexts that are required), let's do a dirty hack here.
3216 if (iv->is_uploaded) {
3217 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3218 device_printf(sc->sc_dev,
3219 "%s: failed to update MAC\n", __func__);
3222 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3223 in->in_ni.ni_chan, 1, 1)) != 0) {
3224 device_printf(sc->sc_dev,
3225 "%s: failed update phy ctxt\n", __func__);
3228 in->in_phyctxt = &sc->sc_phyctxt[0];
3230 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3231 device_printf(sc->sc_dev,
3232 "%s: binding update cmd\n", __func__);
3235 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3236 device_printf(sc->sc_dev,
3237 "%s: failed to update sta\n", __func__);
3241 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3242 device_printf(sc->sc_dev,
3243 "%s: failed to add MAC\n", __func__);
3246 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3247 in->in_ni.ni_chan, 1, 1)) != 0) {
3248 device_printf(sc->sc_dev,
3249 "%s: failed add phy ctxt!\n", __func__);
3253 in->in_phyctxt = &sc->sc_phyctxt[0];
3255 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3256 device_printf(sc->sc_dev,
3257 "%s: binding add cmd\n", __func__);
3260 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3261 device_printf(sc->sc_dev,
3262 "%s: failed to add sta\n", __func__);
3268 * Prevent the FW from wandering off channel during association
3269 * by "protecting" the session with a time event.
3271 /* XXX duration is in units of TU, not MS */
3272 duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3273 iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3278 ieee80211_free_node(ni);
3283 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3285 struct iwm_node *in = IWM_NODE(vap->iv_bss);
3288 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3289 device_printf(sc->sc_dev,
3290 "%s: failed to update STA\n", __func__);
3295 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3296 device_printf(sc->sc_dev,
3297 "%s: failed to update MAC\n", __func__);
3305 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3308 * Ok, so *technically* the proper set of calls for going
3309 * from RUN back to SCAN is:
3311 * iwm_mvm_power_mac_disable(sc, in);
3312 * iwm_mvm_mac_ctxt_changed(sc, in);
3313 * iwm_mvm_rm_sta(sc, in);
3314 * iwm_mvm_update_quotas(sc, NULL);
3315 * iwm_mvm_mac_ctxt_changed(sc, in);
3316 * iwm_mvm_binding_remove_vif(sc, in);
3317 * iwm_mvm_mac_ctxt_remove(sc, in);
3319 * However, that freezes the device not matter which permutations
3320 * and modifications are attempted. Obviously, this driver is missing
3321 * something since it works in the Linux driver, but figuring out what
3322 * is missing is a little more complicated. Now, since we're going
3323 * back to nothing anyway, we'll just do a complete device reset.
3324 * Up your's, device!
3326 //iwm_mvm_flush_tx_path(sc, 0xf, 1);
3327 iwm_stop_device(sc);
3336 iwm_mvm_power_mac_disable(sc, in);
3338 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3339 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
3343 if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
3344 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
3347 error = iwm_mvm_rm_sta(sc, in);
3349 iwm_mvm_update_quotas(sc, NULL);
3350 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3351 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
3354 iwm_mvm_binding_remove_vif(sc, in);
3356 iwm_mvm_mac_ctxt_remove(sc, in);
3362 static struct ieee80211_node *
3363 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3365 return malloc(sizeof (struct iwm_node), M_80211_NODE,
3370 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
3372 struct ieee80211_node *ni = &in->in_ni;
3373 struct iwm_lq_cmd *lq = &in->in_lq;
3374 int nrates = ni->ni_rates.rs_nrates;
3375 int i, ridx, tab = 0;
3378 if (nrates > nitems(lq->rs_table)) {
3379 device_printf(sc->sc_dev,
3380 "%s: node supports %d rates, driver handles "
3381 "only %zu\n", __func__, nrates, nitems(lq->rs_table));
3385 device_printf(sc->sc_dev,
3386 "%s: node supports 0 rates, odd!\n", __func__);
3391 * XXX .. and most of iwm_node is not initialised explicitly;
3392 * it's all just 0x0 passed to the firmware.
3395 /* first figure out which rates we should support */
3396 /* XXX TODO: this isn't 11n aware /at all/ */
3397 memset(&in->in_ridx, -1, sizeof(in->in_ridx));
3398 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3399 "%s: nrates=%d\n", __func__, nrates);
3402 * Loop over nrates and populate in_ridx from the highest
3403 * rate to the lowest rate. Remember, in_ridx[] has
3404 * IEEE80211_RATE_MAXSIZE entries!
3406 for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
3407 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
3409 /* Map 802.11 rate to HW rate index. */
3410 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
3411 if (iwm_rates[ridx].rate == rate)
3413 if (ridx > IWM_RIDX_MAX) {
3414 device_printf(sc->sc_dev,
3415 "%s: WARNING: device rate for %d not found!\n",
3418 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3419 "%s: rate: i: %d, rate=%d, ridx=%d\n",
3424 in->in_ridx[i] = ridx;
3428 /* then construct a lq_cmd based on those */
3429 memset(lq, 0, sizeof(*lq));
3430 lq->sta_id = IWM_STATION_ID;
3433 * are these used? (we don't do SISO or MIMO)
3434 * need to set them to non-zero, though, or we get an error.
3436 lq->single_stream_ant_msk = 1;
3437 lq->dual_stream_ant_msk = 1;
3440 * Build the actual rate selection table.
3441 * The lowest bits are the rates. Additionally,
3442 * CCK needs bit 9 to be set. The rest of the bits
3443 * we add to the table select the tx antenna
3444 * Note that we add the rates in the highest rate first
3445 * (opposite of ni_rates).
3448 * XXX TODO: this should be looping over the min of nrates
3449 * and LQ_MAX_RETRY_NUM. Sigh.
3451 for (i = 0; i < nrates; i++) {
3455 txant = IWM_FW_VALID_TX_ANT(sc);
3456 nextant = 1<<(ffs(txant)-1);
3460 * Map the rate id into a rate index into
3461 * our hardware table containing the
3462 * configuration to use for this rate.
3464 ridx = in->in_ridx[i];
3465 tab = iwm_rates[ridx].plcp;
3466 tab |= nextant << IWM_RATE_MCS_ANT_POS;
3467 if (IWM_RIDX_IS_CCK(ridx))
3468 tab |= IWM_RATE_MCS_CCK_MSK;
3469 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3470 "station rate i=%d, rate=%d, hw=%x\n",
3471 i, iwm_rates[ridx].rate, tab);
3472 lq->rs_table[i] = htole32(tab);
3474 /* then fill the rest with the lowest possible rate */
3475 for (i = nrates; i < nitems(lq->rs_table); i++) {
3476 KASSERT(tab != 0, ("invalid tab"));
3477 lq->rs_table[i] = htole32(tab);
3482 iwm_media_change(struct ifnet *ifp)
3484 struct ieee80211vap *vap = ifp->if_softc;
3485 struct ieee80211com *ic = vap->iv_ic;
3486 struct iwm_softc *sc = ic->ic_softc;
3489 error = ieee80211_media_change(ifp);
3490 if (error != ENETRESET)
3494 if (ic->ic_nrunning > 0) {
3504 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
3506 struct iwm_vap *ivp = IWM_VAP(vap);
3507 struct ieee80211com *ic = vap->iv_ic;
3508 struct iwm_softc *sc = ic->ic_softc;
3509 struct iwm_node *in;
3512 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3513 "switching state %s -> %s\n",
3514 ieee80211_state_name[vap->iv_state],
3515 ieee80211_state_name[nstate]);
3516 IEEE80211_UNLOCK(ic);
3518 /* disable beacon filtering if we're hopping out of RUN */
3519 if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
3520 iwm_mvm_disable_beacon_filter(sc);
3522 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
3525 iwm_release(sc, NULL);
3528 * It's impossible to directly go RUN->SCAN. If we iwm_release()
3529 * above then the card will be completely reinitialized,
3530 * so the driver must do everything necessary to bring the card
3531 * from INIT to SCAN.
3533 * Additionally, upon receiving deauth frame from AP,
3534 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
3535 * state. This will also fail with this driver, so bring the FSM
3536 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
3538 * XXX TODO: fix this for FreeBSD!
3540 if (nstate == IEEE80211_S_SCAN ||
3541 nstate == IEEE80211_S_AUTH ||
3542 nstate == IEEE80211_S_ASSOC) {
3543 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3544 "Force transition to INIT; MGT=%d\n", arg);
3547 vap->iv_newstate(vap, IEEE80211_S_INIT, arg);
3548 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3549 "Going INIT->SCAN\n");
3550 nstate = IEEE80211_S_SCAN;
3551 IEEE80211_UNLOCK(ic);
3557 case IEEE80211_S_INIT:
3558 sc->sc_scanband = 0;
3561 case IEEE80211_S_AUTH:
3562 if ((error = iwm_auth(vap, sc)) != 0) {
3563 device_printf(sc->sc_dev,
3564 "%s: could not move to auth state: %d\n",
3570 case IEEE80211_S_ASSOC:
3571 if ((error = iwm_assoc(vap, sc)) != 0) {
3572 device_printf(sc->sc_dev,
3573 "%s: failed to associate: %d\n", __func__,
3579 case IEEE80211_S_RUN:
3581 struct iwm_host_cmd cmd = {
3583 .len = { sizeof(in->in_lq), },
3584 .flags = IWM_CMD_SYNC,
3587 /* Update the association state, now we have it all */
3588 /* (eg associd comes in at this point */
3589 error = iwm_assoc(vap, sc);
3591 device_printf(sc->sc_dev,
3592 "%s: failed to update association state: %d\n",
3598 in = IWM_NODE(vap->iv_bss);
3599 iwm_mvm_power_mac_update_mode(sc, in);
3600 iwm_mvm_enable_beacon_filter(sc, in);
3601 iwm_mvm_update_quotas(sc, in);
3602 iwm_setrates(sc, in);
3604 cmd.data[0] = &in->in_lq;
3605 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
3606 device_printf(sc->sc_dev,
3607 "%s: IWM_LQ_CMD failed\n", __func__);
3619 return (ivp->iv_newstate(vap, nstate, arg));
3623 iwm_endscan_cb(void *arg, int pending)
3625 struct iwm_softc *sc = arg;
3626 struct ieee80211com *ic = &sc->sc_ic;
3630 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
3635 if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
3636 sc->sc_nvm.sku_cap_band_52GHz_enable) {
3638 if ((error = iwm_mvm_scan_request(sc,
3639 IEEE80211_CHAN_5GHZ, 0, NULL, 0)) != 0) {
3640 device_printf(sc->sc_dev, "could not initiate scan\n");
3649 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
3651 sc->sc_scanband = 0;
3657 iwm_init_hw(struct iwm_softc *sc)
3659 struct ieee80211com *ic = &sc->sc_ic;
3662 if ((error = iwm_start_hw(sc)) != 0)
3665 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
3670 * should stop and start HW since that INIT
3673 iwm_stop_device(sc);
3674 if ((error = iwm_start_hw(sc)) != 0) {
3675 device_printf(sc->sc_dev, "could not initialize hardware\n");
3679 /* omstart, this time with the regular firmware */
3680 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
3682 device_printf(sc->sc_dev, "could not load firmware\n");
3686 if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
3689 /* Send phy db control command and then phy db calibration*/
3690 if ((error = iwm_send_phy_db_data(sc)) != 0)
3693 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
3696 /* Add auxiliary station for scanning */
3697 if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
3700 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
3702 * The channel used here isn't relevant as it's
3703 * going to be overwritten in the other flows.
3704 * For now use the first channel we have.
3706 if ((error = iwm_mvm_phy_ctxt_add(sc,
3707 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
3711 error = iwm_mvm_power_update_device(sc);
3715 /* Mark TX rings as active. */
3716 for (qid = 0; qid < 4; qid++) {
3717 iwm_enable_txq(sc, qid, qid);
3723 iwm_stop_device(sc);
3727 /* Allow multicast from our BSSID. */
3729 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
3731 struct ieee80211_node *ni = vap->iv_bss;
3732 struct iwm_mcast_filter_cmd *cmd;
3736 size = roundup(sizeof(*cmd), 4);
3737 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
3740 cmd->filter_own = 1;
3744 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
3746 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
3747 IWM_CMD_SYNC, size, cmd);
3748 free(cmd, M_DEVBUF);
3754 iwm_init(struct iwm_softc *sc)
3758 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
3761 sc->sc_generation++;
3762 sc->sc_flags &= ~IWM_FLAG_STOPPED;
3764 if ((error = iwm_init_hw(sc)) != 0) {
3770 * Ok, firmware loaded and we are jogging
3772 sc->sc_flags |= IWM_FLAG_HW_INITED;
3773 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3777 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
3779 struct iwm_softc *sc;
3785 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3789 error = mbufq_enqueue(&sc->sc_snd, m);
3800 * Dequeue packets from sendq and call send.
3803 iwm_start(struct iwm_softc *sc)
3805 struct ieee80211_node *ni;
3809 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
3810 while (sc->qfullmsk == 0 &&
3811 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
3812 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3813 if (iwm_tx(sc, m, ni, ac) != 0) {
3814 if_inc_counter(ni->ni_vap->iv_ifp,
3815 IFCOUNTER_OERRORS, 1);
3816 ieee80211_free_node(ni);
3819 sc->sc_tx_timer = 15;
3821 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
3825 iwm_stop(struct iwm_softc *sc)
3828 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
3829 sc->sc_flags |= IWM_FLAG_STOPPED;
3830 sc->sc_generation++;
3831 sc->sc_scanband = 0;
3832 sc->sc_tx_timer = 0;
3833 iwm_stop_device(sc);
3837 iwm_watchdog(void *arg)
3839 struct iwm_softc *sc = arg;
3840 struct ieee80211com *ic = &sc->sc_ic;
3842 if (sc->sc_tx_timer > 0) {
3843 if (--sc->sc_tx_timer == 0) {
3844 device_printf(sc->sc_dev, "device timeout\n");
3848 ieee80211_restart_all(ic);
3849 counter_u64_add(ic->ic_oerrors, 1);
3853 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3857 iwm_parent(struct ieee80211com *ic)
3859 struct iwm_softc *sc = ic->ic_softc;
3863 if (ic->ic_nrunning > 0) {
3864 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
3868 } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
3872 ieee80211_start_all(ic);
3876 * The interrupt side of things
3880 * error dumping routines are from iwlwifi/mvm/utils.c
3884 * Note: This structure is read from the device with IO accesses,
3885 * and the reading already does the endian conversion. As it is
3886 * read with uint32_t-sized accesses, any members with a different size
3887 * need to be ordered correctly though!
3889 struct iwm_error_event_table {
3890 uint32_t valid; /* (nonzero) valid, (0) log is empty */
3891 uint32_t error_id; /* type of error */
3892 uint32_t pc; /* program counter */
3893 uint32_t blink1; /* branch link */
3894 uint32_t blink2; /* branch link */
3895 uint32_t ilink1; /* interrupt link */
3896 uint32_t ilink2; /* interrupt link */
3897 uint32_t data1; /* error-specific data */
3898 uint32_t data2; /* error-specific data */
3899 uint32_t data3; /* error-specific data */
3900 uint32_t bcon_time; /* beacon timer */
3901 uint32_t tsf_low; /* network timestamp function timer */
3902 uint32_t tsf_hi; /* network timestamp function timer */
3903 uint32_t gp1; /* GP1 timer register */
3904 uint32_t gp2; /* GP2 timer register */
3905 uint32_t gp3; /* GP3 timer register */
3906 uint32_t ucode_ver; /* uCode version */
3907 uint32_t hw_ver; /* HW Silicon version */
3908 uint32_t brd_ver; /* HW board version */
3909 uint32_t log_pc; /* log program counter */
3910 uint32_t frame_ptr; /* frame pointer */
3911 uint32_t stack_ptr; /* stack pointer */
3912 uint32_t hcmd; /* last host command header */
3913 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
3915 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
3917 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
3919 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
3921 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
3923 uint32_t isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
3924 uint32_t wait_event; /* wait event() caller address */
3925 uint32_t l2p_control; /* L2pControlField */
3926 uint32_t l2p_duration; /* L2pDurationField */
3927 uint32_t l2p_mhvalid; /* L2pMhValidBits */
3928 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
3929 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
3931 uint32_t u_timestamp; /* indicate when the date and time of the
3933 uint32_t flow_handler; /* FH read/write pointers, RX credit */
3936 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
3937 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
3943 } advanced_lookup[] = {
3944 { "NMI_INTERRUPT_WDG", 0x34 },
3945 { "SYSASSERT", 0x35 },
3946 { "UCODE_VERSION_MISMATCH", 0x37 },
3947 { "BAD_COMMAND", 0x38 },
3948 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
3949 { "FATAL_ERROR", 0x3D },
3950 { "NMI_TRM_HW_ERR", 0x46 },
3951 { "NMI_INTERRUPT_TRM", 0x4C },
3952 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
3953 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
3954 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
3955 { "NMI_INTERRUPT_HOST", 0x66 },
3956 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
3957 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
3958 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
3959 { "ADVANCED_SYSASSERT", 0 },
3963 iwm_desc_lookup(uint32_t num)
3967 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
3968 if (advanced_lookup[i].num == num)
3969 return advanced_lookup[i].name;
3971 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
3972 return advanced_lookup[i].name;
3976 * Support for dumping the error log seemed like a good idea ...
3977 * but it's mostly hex junk and the only sensible thing is the
3978 * hw/ucode revision (which we know anyway). Since it's here,
3979 * I'll just leave it in, just in case e.g. the Intel guys want to
3980 * help us decipher some "ADVANCED_SYSASSERT" later.
3983 iwm_nic_error(struct iwm_softc *sc)
3985 struct iwm_error_event_table table;
3988 device_printf(sc->sc_dev, "dumping device error log\n");
3989 base = sc->sc_uc.uc_error_event_table;
3990 if (base < 0x800000 || base >= 0x80C000) {
3991 device_printf(sc->sc_dev,
3992 "Not valid error log pointer 0x%08x\n", base);
3996 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
3997 device_printf(sc->sc_dev, "reading errlog failed\n");
4002 device_printf(sc->sc_dev, "errlog not found, skipping\n");
4006 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4007 device_printf(sc->sc_dev, "Start IWL Error Log Dump:\n");
4008 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4009 sc->sc_flags, table.valid);
4012 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
4013 iwm_desc_lookup(table.error_id));
4014 device_printf(sc->sc_dev, "%08X | uPc\n", table.pc);
4015 device_printf(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
4016 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
4017 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
4018 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
4019 device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
4020 device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
4021 device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
4022 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
4023 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
4024 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
4025 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
4026 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
4027 device_printf(sc->sc_dev, "%08X | time gp3\n", table.gp3);
4028 device_printf(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
4029 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
4030 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
4031 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
4032 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
4033 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
4034 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
4035 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
4036 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
4037 device_printf(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
4038 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
4039 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
4040 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
4041 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
4042 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
4043 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
4044 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
4045 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
4049 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
4051 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4052 _var_ = (void *)((_pkt_)+1); \
4053 } while (/*CONSTCOND*/0)
4055 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
4057 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4058 _ptr_ = (void *)((_pkt_)+1); \
4059 } while (/*CONSTCOND*/0)
4061 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
4064 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
4065 * Basic structure from if_iwn
4068 iwm_notif_intr(struct iwm_softc *sc)
4072 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
4073 BUS_DMASYNC_POSTREAD);
4075 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
4076 while (sc->rxq.cur != hw) {
4077 struct iwm_rx_ring *ring = &sc->rxq;
4078 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
4079 struct iwm_rx_packet *pkt;
4080 struct iwm_cmd_response *cresp;
4083 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4084 BUS_DMASYNC_POSTREAD);
4085 pkt = mtod(data->m, struct iwm_rx_packet *);
4087 qid = pkt->hdr.qid & ~0x80;
4090 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4091 "rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
4092 pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
4093 pkt->hdr.code, sc->rxq.cur, hw);
4096 * randomly get these from the firmware, no idea why.
4097 * they at least seem harmless, so just ignore them for now
4099 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
4100 || pkt->len_n_flags == htole32(0x55550000))) {
4105 switch (pkt->hdr.code) {
4106 case IWM_REPLY_RX_PHY_CMD:
4107 iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
4110 case IWM_REPLY_RX_MPDU_CMD:
4111 iwm_mvm_rx_rx_mpdu(sc, pkt, data);
4115 iwm_mvm_rx_tx_cmd(sc, pkt, data);
4118 case IWM_MISSED_BEACONS_NOTIFICATION: {
4119 struct iwm_missed_beacons_notif *resp;
4122 /* XXX look at mac_id to determine interface ID */
4123 struct ieee80211com *ic = &sc->sc_ic;
4124 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4126 SYNC_RESP_STRUCT(resp, pkt);
4127 missed = le32toh(resp->consec_missed_beacons);
4129 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
4130 "%s: MISSED_BEACON: mac_id=%d, "
4131 "consec_since_last_rx=%d, consec=%d, num_expect=%d "
4134 le32toh(resp->mac_id),
4135 le32toh(resp->consec_missed_beacons_since_last_rx),
4136 le32toh(resp->consec_missed_beacons),
4137 le32toh(resp->num_expected_beacons),
4138 le32toh(resp->num_recvd_beacons));
4144 /* XXX no net80211 locking? */
4145 if (vap->iv_state == IEEE80211_S_RUN &&
4146 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
4147 if (missed > vap->iv_bmissthreshold) {
4148 /* XXX bad locking; turn into task */
4150 ieee80211_beacon_miss(ic);
4157 case IWM_MVM_ALIVE: {
4158 struct iwm_mvm_alive_resp *resp;
4159 SYNC_RESP_STRUCT(resp, pkt);
4161 sc->sc_uc.uc_error_event_table
4162 = le32toh(resp->error_event_table_ptr);
4163 sc->sc_uc.uc_log_event_table
4164 = le32toh(resp->log_event_table_ptr);
4165 sc->sched_base = le32toh(resp->scd_base_ptr);
4166 sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
4168 sc->sc_uc.uc_intr = 1;
4172 case IWM_CALIB_RES_NOTIF_PHY_DB: {
4173 struct iwm_calib_res_notif_phy_db *phy_db_notif;
4174 SYNC_RESP_STRUCT(phy_db_notif, pkt);
4176 iwm_phy_db_set_section(sc, phy_db_notif);
4180 case IWM_STATISTICS_NOTIFICATION: {
4181 struct iwm_notif_statistics *stats;
4182 SYNC_RESP_STRUCT(stats, pkt);
4183 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
4184 sc->sc_noise = iwm_get_noise(&stats->rx.general);
4187 case IWM_NVM_ACCESS_CMD:
4188 if (sc->sc_wantresp == ((qid << 16) | idx)) {
4189 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4190 BUS_DMASYNC_POSTREAD);
4191 memcpy(sc->sc_cmd_resp,
4192 pkt, sizeof(sc->sc_cmd_resp));
4196 case IWM_PHY_CONFIGURATION_CMD:
4197 case IWM_TX_ANT_CONFIGURATION_CMD:
4199 case IWM_MAC_CONTEXT_CMD:
4200 case IWM_REPLY_SF_CFG_CMD:
4201 case IWM_POWER_TABLE_CMD:
4202 case IWM_PHY_CONTEXT_CMD:
4203 case IWM_BINDING_CONTEXT_CMD:
4204 case IWM_TIME_EVENT_CMD:
4205 case IWM_SCAN_REQUEST_CMD:
4206 case IWM_REPLY_BEACON_FILTERING_CMD:
4207 case IWM_MAC_PM_POWER_TABLE:
4208 case IWM_TIME_QUOTA_CMD:
4209 case IWM_REMOVE_STA:
4210 case IWM_TXPATH_FLUSH:
4212 SYNC_RESP_STRUCT(cresp, pkt);
4213 if (sc->sc_wantresp == ((qid << 16) | idx)) {
4214 memcpy(sc->sc_cmd_resp,
4215 pkt, sizeof(*pkt)+sizeof(*cresp));
4220 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
4223 case IWM_INIT_COMPLETE_NOTIF:
4224 sc->sc_init_complete = 1;
4225 wakeup(&sc->sc_init_complete);
4228 case IWM_SCAN_COMPLETE_NOTIFICATION: {
4229 struct iwm_scan_complete_notif *notif;
4230 SYNC_RESP_STRUCT(notif, pkt);
4231 taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
4234 case IWM_REPLY_ERROR: {
4235 struct iwm_error_resp *resp;
4236 SYNC_RESP_STRUCT(resp, pkt);
4238 device_printf(sc->sc_dev,
4239 "firmware error 0x%x, cmd 0x%x\n",
4240 le32toh(resp->error_type),
4244 case IWM_TIME_EVENT_NOTIFICATION: {
4245 struct iwm_time_event_notif *notif;
4246 SYNC_RESP_STRUCT(notif, pkt);
4248 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4249 "TE notif status = 0x%x action = 0x%x\n",
4250 notif->status, notif->action);
4253 case IWM_MCAST_FILTER_CMD:
4257 device_printf(sc->sc_dev,
4258 "frame %d/%d %x UNHANDLED (this should "
4259 "not happen)\n", qid, idx,
4265 * Why test bit 0x80? The Linux driver:
4267 * There is one exception: uCode sets bit 15 when it
4268 * originates the response/notification, i.e. when the
4269 * response/notification is not a direct response to a
4270 * command sent by the driver. For example, uCode issues
4271 * IWM_REPLY_RX when it sends a received frame to the driver;
4272 * it is not a direct response to any driver command.
4274 * Ok, so since when is 7 == 15? Well, the Linux driver
4275 * uses a slightly different format for pkt->hdr, and "qid"
4276 * is actually the upper byte of a two-byte field.
4278 if (!(pkt->hdr.qid & (1 << 7))) {
4279 iwm_cmd_done(sc, pkt);
4285 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
4286 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4289 * Tell the firmware what we have processed.
4290 * Seems like the hardware gets upset unless we align
4293 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
4294 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
4300 struct iwm_softc *sc = arg;
4306 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
4308 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
4309 uint32_t *ict = sc->ict_dma.vaddr;
4312 tmp = htole32(ict[sc->ict_cur]);
4317 * ok, there was something. keep plowing until we have all.
4322 ict[sc->ict_cur] = 0;
4323 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
4324 tmp = htole32(ict[sc->ict_cur]);
4327 /* this is where the fun begins. don't ask */
4328 if (r1 == 0xffffffff)
4331 /* i am not expected to understand this */
4334 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
4336 r1 = IWM_READ(sc, IWM_CSR_INT);
4337 /* "hardware gone" (where, fishing?) */
4338 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
4340 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
4342 if (r1 == 0 && r2 == 0) {
4346 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
4349 handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
4351 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
4353 struct ieee80211com *ic = &sc->sc_ic;
4354 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4359 /* Dump driver status (TX and RX rings) while we're here. */
4360 device_printf(sc->sc_dev, "driver status:\n");
4361 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
4362 struct iwm_tx_ring *ring = &sc->txq[i];
4363 device_printf(sc->sc_dev,
4364 " tx ring %2d: qid=%-2d cur=%-3d "
4366 i, ring->qid, ring->cur, ring->queued);
4368 device_printf(sc->sc_dev,
4369 " rx ring: cur=%d\n", sc->rxq.cur);
4370 device_printf(sc->sc_dev,
4371 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
4373 /* Don't stop the device; just do a VAP restart */
4377 printf("%s: null vap\n", __func__);
4381 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
4382 "restarting\n", __func__, vap->iv_state);
4384 /* XXX TODO: turn this into a callout/taskqueue */
4385 ieee80211_restart_all(ic);
4389 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
4390 handled |= IWM_CSR_INT_BIT_HW_ERR;
4391 device_printf(sc->sc_dev, "hardware error, stopping device\n");
4397 /* firmware chunk loaded */
4398 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
4399 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
4400 handled |= IWM_CSR_INT_BIT_FH_TX;
4401 sc->sc_fw_chunk_done = 1;
4405 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
4406 handled |= IWM_CSR_INT_BIT_RF_KILL;
4407 if (iwm_check_rfkill(sc)) {
4408 device_printf(sc->sc_dev,
4409 "%s: rfkill switch, disabling interface\n",
4416 * The Linux driver uses periodic interrupts to avoid races.
4417 * We cargo-cult like it's going out of fashion.
4419 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
4420 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
4421 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
4422 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
4424 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
4428 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
4429 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
4430 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
4434 /* enable periodic interrupt, see above */
4435 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
4436 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
4437 IWM_CSR_INT_PERIODIC_ENA);
4440 if (__predict_false(r1 & ~handled))
4441 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4442 "%s: unhandled interrupts: %x\n", __func__, r1);
4446 iwm_restore_interrupts(sc);
4453 * Autoconf glue-sniffing
4455 #define PCI_VENDOR_INTEL 0x8086
4456 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3
4457 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4
4458 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1
4459 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2
4460 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a
4461 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b
4463 static const struct iwm_devices {
4467 { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
4468 { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
4469 { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
4470 { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
4471 { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
4472 { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
4476 iwm_probe(device_t dev)
4480 for (i = 0; i < nitems(iwm_devices); i++)
4481 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
4482 pci_get_device(dev) == iwm_devices[i].device) {
4483 device_set_desc(dev, iwm_devices[i].name);
4484 return (BUS_PROBE_DEFAULT);
4491 iwm_dev_check(device_t dev)
4493 struct iwm_softc *sc;
4495 sc = device_get_softc(dev);
4497 switch (pci_get_device(dev)) {
4498 case PCI_PRODUCT_INTEL_WL_3160_1:
4499 case PCI_PRODUCT_INTEL_WL_3160_2:
4500 sc->sc_fwname = "iwm3160fw";
4501 sc->host_interrupt_operation_mode = 1;
4503 case PCI_PRODUCT_INTEL_WL_7260_1:
4504 case PCI_PRODUCT_INTEL_WL_7260_2:
4505 sc->sc_fwname = "iwm7260fw";
4506 sc->host_interrupt_operation_mode = 1;
4508 case PCI_PRODUCT_INTEL_WL_7265_1:
4509 case PCI_PRODUCT_INTEL_WL_7265_2:
4510 sc->sc_fwname = "iwm7265fw";
4511 sc->host_interrupt_operation_mode = 0;
4514 device_printf(dev, "unknown adapter type\n");
4520 iwm_pci_attach(device_t dev)
4522 struct iwm_softc *sc;
4523 int count, error, rid;
4526 sc = device_get_softc(dev);
4528 /* Clear device-specific "PCI retry timeout" register (41h). */
4529 reg = pci_read_config(dev, 0x40, sizeof(reg));
4530 pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
4532 /* Enable bus-mastering and hardware bug workaround. */
4533 pci_enable_busmaster(dev);
4534 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
4536 if (reg & PCIM_STATUS_INTxSTATE) {
4537 reg &= ~PCIM_STATUS_INTxSTATE;
4539 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
4542 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
4544 if (sc->sc_mem == NULL) {
4545 device_printf(sc->sc_dev, "can't map mem space\n");
4548 sc->sc_st = rman_get_bustag(sc->sc_mem);
4549 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
4551 /* Install interrupt handler. */
4554 if (pci_alloc_msi(dev, &count) == 0)
4556 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
4557 (rid != 0 ? 0 : RF_SHAREABLE));
4558 if (sc->sc_irq == NULL) {
4559 device_printf(dev, "can't map interrupt\n");
4562 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
4563 NULL, iwm_intr, sc, &sc->sc_ih);
4564 if (sc->sc_ih == NULL) {
4565 device_printf(dev, "can't establish interrupt");
4568 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
4574 iwm_pci_detach(device_t dev)
4576 struct iwm_softc *sc = device_get_softc(dev);
4578 if (sc->sc_irq != NULL) {
4579 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
4580 bus_release_resource(dev, SYS_RES_IRQ,
4581 rman_get_rid(sc->sc_irq), sc->sc_irq);
4582 pci_release_msi(dev);
4584 if (sc->sc_mem != NULL)
4585 bus_release_resource(dev, SYS_RES_MEMORY,
4586 rman_get_rid(sc->sc_mem), sc->sc_mem);
4592 iwm_attach(device_t dev)
4594 struct iwm_softc *sc = device_get_softc(dev);
4595 struct ieee80211com *ic = &sc->sc_ic;
4601 mbufq_init(&sc->sc_snd, ifqmaxlen);
4602 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
4603 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
4604 sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
4605 taskqueue_thread_enqueue, &sc->sc_tq);
4606 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
4608 device_printf(dev, "can't start threads, error %d\n",
4614 error = iwm_pci_attach(dev);
4618 sc->sc_wantresp = -1;
4620 /* Check device type */
4621 error = iwm_dev_check(dev);
4625 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
4628 * We now start fiddling with the hardware
4630 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
4631 if (iwm_prepare_card_hw(sc) != 0) {
4632 device_printf(dev, "could not initialize hardware\n");
4636 /* Allocate DMA memory for firmware transfers. */
4637 if ((error = iwm_alloc_fwmem(sc)) != 0) {
4638 device_printf(dev, "could not allocate memory for firmware\n");
4642 /* Allocate "Keep Warm" page. */
4643 if ((error = iwm_alloc_kw(sc)) != 0) {
4644 device_printf(dev, "could not allocate keep warm page\n");
4648 /* We use ICT interrupts */
4649 if ((error = iwm_alloc_ict(sc)) != 0) {
4650 device_printf(dev, "could not allocate ICT table\n");
4654 /* Allocate TX scheduler "rings". */
4655 if ((error = iwm_alloc_sched(sc)) != 0) {
4656 device_printf(dev, "could not allocate TX scheduler rings\n");
4660 /* Allocate TX rings */
4661 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
4662 if ((error = iwm_alloc_tx_ring(sc,
4663 &sc->txq[txq_i], txq_i)) != 0) {
4665 "could not allocate TX ring %d\n",
4671 /* Allocate RX ring. */
4672 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
4673 device_printf(dev, "could not allocate RX ring\n");
4677 /* Clear pending interrupts. */
4678 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
4681 ic->ic_name = device_get_nameunit(sc->sc_dev);
4682 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
4683 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
4685 /* Set device capabilities. */
4688 IEEE80211_C_WPA | /* WPA/RSN */
4690 IEEE80211_C_SHSLOT | /* short slot time supported */
4691 IEEE80211_C_SHPREAMBLE /* short preamble supported */
4692 // IEEE80211_C_BGSCAN /* capable of bg scanning */
4694 for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
4695 sc->sc_phyctxt[i].id = i;
4696 sc->sc_phyctxt[i].color = 0;
4697 sc->sc_phyctxt[i].ref = 0;
4698 sc->sc_phyctxt[i].channel = NULL;
4702 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
4703 sc->sc_preinit_hook.ich_func = iwm_preinit;
4704 sc->sc_preinit_hook.ich_arg = sc;
4705 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
4706 device_printf(dev, "config_intrhook_establish failed\n");
4711 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4712 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
4713 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
4716 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4717 "<-%s\n", __func__);
4721 /* Free allocated memory if something failed during attachment. */
4723 iwm_detach_local(sc, 0);
4729 iwm_update_edca(struct ieee80211com *ic)
4731 struct iwm_softc *sc = ic->ic_softc;
4733 device_printf(sc->sc_dev, "%s: called\n", __func__);
4738 iwm_preinit(void *arg)
4740 struct iwm_softc *sc = arg;
4741 device_t dev = sc->sc_dev;
4742 struct ieee80211com *ic = &sc->sc_ic;
4745 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4746 "->%s\n", __func__);
4749 if ((error = iwm_start_hw(sc)) != 0) {
4750 device_printf(dev, "could not initialize hardware\n");
4755 error = iwm_run_init_mvm_ucode(sc, 1);
4756 iwm_stop_device(sc);
4762 "revision: 0x%x, firmware %d.%d (API ver. %d)\n",
4763 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
4764 IWM_UCODE_MAJOR(sc->sc_fwver),
4765 IWM_UCODE_MINOR(sc->sc_fwver),
4766 IWM_UCODE_API(sc->sc_fwver));
4768 /* not all hardware can do 5GHz band */
4769 if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
4770 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
4771 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
4774 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
4778 * At this point we've committed - if we fail to do setup,
4779 * we now also have to tear down the net80211 state.
4781 ieee80211_ifattach(ic);
4782 ic->ic_vap_create = iwm_vap_create;
4783 ic->ic_vap_delete = iwm_vap_delete;
4784 ic->ic_raw_xmit = iwm_raw_xmit;
4785 ic->ic_node_alloc = iwm_node_alloc;
4786 ic->ic_scan_start = iwm_scan_start;
4787 ic->ic_scan_end = iwm_scan_end;
4788 ic->ic_update_mcast = iwm_update_mcast;
4789 ic->ic_getradiocaps = iwm_init_channel_map;
4790 ic->ic_set_channel = iwm_set_channel;
4791 ic->ic_scan_curchan = iwm_scan_curchan;
4792 ic->ic_scan_mindwell = iwm_scan_mindwell;
4793 ic->ic_wme.wme_update = iwm_update_edca;
4794 ic->ic_parent = iwm_parent;
4795 ic->ic_transmit = iwm_transmit;
4796 iwm_radiotap_attach(sc);
4798 ieee80211_announce(ic);
4800 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4801 "<-%s\n", __func__);
4802 config_intrhook_disestablish(&sc->sc_preinit_hook);
4806 config_intrhook_disestablish(&sc->sc_preinit_hook);
4807 iwm_detach_local(sc, 0);
4811 * Attach the interface to 802.11 radiotap.
4814 iwm_radiotap_attach(struct iwm_softc *sc)
4816 struct ieee80211com *ic = &sc->sc_ic;
4818 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4819 "->%s begin\n", __func__);
4820 ieee80211_radiotap_attach(ic,
4821 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
4822 IWM_TX_RADIOTAP_PRESENT,
4823 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
4824 IWM_RX_RADIOTAP_PRESENT);
4825 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4826 "->%s end\n", __func__);
4829 static struct ieee80211vap *
4830 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
4831 enum ieee80211_opmode opmode, int flags,
4832 const uint8_t bssid[IEEE80211_ADDR_LEN],
4833 const uint8_t mac[IEEE80211_ADDR_LEN])
4835 struct iwm_vap *ivp;
4836 struct ieee80211vap *vap;
4838 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
4840 ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
4842 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
4843 vap->iv_bmissthreshold = 10; /* override default */
4844 /* Override with driver methods. */
4845 ivp->iv_newstate = vap->iv_newstate;
4846 vap->iv_newstate = iwm_newstate;
4848 ieee80211_ratectl_init(vap);
4849 /* Complete setup. */
4850 ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
4852 ic->ic_opmode = opmode;
4858 iwm_vap_delete(struct ieee80211vap *vap)
4860 struct iwm_vap *ivp = IWM_VAP(vap);
4862 ieee80211_ratectl_deinit(vap);
4863 ieee80211_vap_detach(vap);
4864 free(ivp, M_80211_VAP);
4868 iwm_scan_start(struct ieee80211com *ic)
4870 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4871 struct iwm_softc *sc = ic->ic_softc;
4874 if (sc->sc_scanband)
4877 error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ, 0, NULL, 0);
4879 device_printf(sc->sc_dev, "could not initiate scan\n");
4881 ieee80211_cancel_scan(vap);
4887 iwm_scan_end(struct ieee80211com *ic)
4892 iwm_update_mcast(struct ieee80211com *ic)
4897 iwm_set_channel(struct ieee80211com *ic)
4902 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
4907 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
4913 iwm_init_task(void *arg1)
4915 struct iwm_softc *sc = arg1;
4918 while (sc->sc_flags & IWM_FLAG_BUSY)
4919 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
4920 sc->sc_flags |= IWM_FLAG_BUSY;
4922 if (sc->sc_ic.ic_nrunning > 0)
4924 sc->sc_flags &= ~IWM_FLAG_BUSY;
4925 wakeup(&sc->sc_flags);
4930 iwm_resume(device_t dev)
4932 struct iwm_softc *sc = device_get_softc(dev);
4936 /* Clear device-specific "PCI retry timeout" register (41h). */
4937 reg = pci_read_config(dev, 0x40, sizeof(reg));
4938 pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
4939 iwm_init_task(device_get_softc(dev));
4942 if (sc->sc_flags & IWM_FLAG_DORESUME) {
4943 sc->sc_flags &= ~IWM_FLAG_DORESUME;
4949 ieee80211_resume_all(&sc->sc_ic);
4955 iwm_suspend(device_t dev)
4958 struct iwm_softc *sc = device_get_softc(dev);
4960 do_stop = !! (sc->sc_ic.ic_nrunning > 0);
4962 ieee80211_suspend_all(&sc->sc_ic);
4967 sc->sc_flags |= IWM_FLAG_DORESUME;
4975 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
4977 struct iwm_fw_info *fw = &sc->sc_fw;
4978 device_t dev = sc->sc_dev;
4982 taskqueue_drain_all(sc->sc_tq);
4983 taskqueue_free(sc->sc_tq);
4985 callout_drain(&sc->sc_watchdog_to);
4986 iwm_stop_device(sc);
4988 ieee80211_ifdetach(&sc->sc_ic);
4990 /* Free descriptor rings */
4991 for (i = 0; i < nitems(sc->txq); i++)
4992 iwm_free_tx_ring(sc, &sc->txq[i]);
4995 if (fw->fw_fp != NULL)
4996 iwm_fw_info_free(fw);
4998 /* Free scheduler */
5000 if (sc->ict_dma.vaddr != NULL)
5002 if (sc->kw_dma.vaddr != NULL)
5004 if (sc->fw_dma.vaddr != NULL)
5007 /* Finished with the hardware - detach things */
5008 iwm_pci_detach(dev);
5010 mbufq_drain(&sc->sc_snd);
5011 IWM_LOCK_DESTROY(sc);
5017 iwm_detach(device_t dev)
5019 struct iwm_softc *sc = device_get_softc(dev);
5021 return (iwm_detach_local(sc, 1));
5024 static device_method_t iwm_pci_methods[] = {
5025 /* Device interface */
5026 DEVMETHOD(device_probe, iwm_probe),
5027 DEVMETHOD(device_attach, iwm_attach),
5028 DEVMETHOD(device_detach, iwm_detach),
5029 DEVMETHOD(device_suspend, iwm_suspend),
5030 DEVMETHOD(device_resume, iwm_resume),
5035 static driver_t iwm_pci_driver = {
5038 sizeof (struct iwm_softc)
5041 static devclass_t iwm_devclass;
5043 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
5044 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
5045 MODULE_DEPEND(iwm, pci, 1, 1, 1);
5046 MODULE_DEPEND(iwm, wlan, 1, 1, 1);