1 /* $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $ */
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
27 ***********************************************************************
29 * This file is provided under a dual BSD/GPLv2 license. When using or
30 * redistributing this file, you may do so under either license.
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
53 * Contact Information:
54 * Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
67 * * Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * * Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in
71 * the documentation and/or other materials provided with the
73 * * Neither the name Intel Corporation nor the names of its
74 * contributors may be used to endorse or promote products derived
75 * from this software without specific prior written permission.
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
108 #include "opt_wlan.h"
110 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_util.h>
157 #include <dev/iwm/if_iwm_binding.h>
158 #include <dev/iwm/if_iwm_phy_db.h>
159 #include <dev/iwm/if_iwm_mac_ctxt.h>
160 #include <dev/iwm/if_iwm_phy_ctxt.h>
161 #include <dev/iwm/if_iwm_time_event.h>
162 #include <dev/iwm/if_iwm_power.h>
163 #include <dev/iwm/if_iwm_scan.h>
165 #include <dev/iwm/if_iwm_pcie_trans.h>
166 #include <dev/iwm/if_iwm_led.h>
168 #define IWM_NVM_HW_SECTION_NUM_FAMILY_7000 0
169 #define IWM_NVM_HW_SECTION_NUM_FAMILY_8000 10
171 /* lower blocks contain EEPROM image and calibration data */
172 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(uint16_t)) /* 16 KB */
173 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(uint16_t)) /* 32 KB */
175 #define IWM7260_FW "iwm7260fw"
176 #define IWM3160_FW "iwm3160fw"
177 #define IWM7265_FW "iwm7265fw"
178 #define IWM7265D_FW "iwm7265Dfw"
179 #define IWM8000_FW "iwm8000Cfw"
181 #define IWM_DEVICE_7000_COMMON \
182 .device_family = IWM_DEVICE_FAMILY_7000, \
183 .eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000, \
184 .nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_7000
186 const struct iwm_cfg iwm7260_cfg = {
187 .fw_name = IWM7260_FW,
188 IWM_DEVICE_7000_COMMON,
189 .host_interrupt_operation_mode = 1,
192 const struct iwm_cfg iwm3160_cfg = {
193 .fw_name = IWM3160_FW,
194 IWM_DEVICE_7000_COMMON,
195 .host_interrupt_operation_mode = 1,
198 const struct iwm_cfg iwm3165_cfg = {
199 /* XXX IWM7265D_FW doesn't seem to work properly yet */
200 .fw_name = IWM7265_FW,
201 IWM_DEVICE_7000_COMMON,
202 .host_interrupt_operation_mode = 0,
205 const struct iwm_cfg iwm7265_cfg = {
206 .fw_name = IWM7265_FW,
207 IWM_DEVICE_7000_COMMON,
208 .host_interrupt_operation_mode = 0,
211 const struct iwm_cfg iwm7265d_cfg = {
212 /* XXX IWM7265D_FW doesn't seem to work properly yet */
213 .fw_name = IWM7265_FW,
214 IWM_DEVICE_7000_COMMON,
215 .host_interrupt_operation_mode = 0,
218 #define IWM_DEVICE_8000_COMMON \
219 .device_family = IWM_DEVICE_FAMILY_8000, \
220 .eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000, \
221 .nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_8000
223 const struct iwm_cfg iwm8260_cfg = {
224 .fw_name = IWM8000_FW,
225 IWM_DEVICE_8000_COMMON,
226 .host_interrupt_operation_mode = 0,
229 const uint8_t iwm_nvm_channels[] = {
231 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
233 36, 40, 44, 48, 52, 56, 60, 64,
234 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
235 149, 153, 157, 161, 165
237 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
238 "IWM_NUM_CHANNELS is too small");
240 const uint8_t iwm_nvm_channels_8000[] = {
242 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
244 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
245 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
246 149, 153, 157, 161, 165, 169, 173, 177, 181
248 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
249 "IWM_NUM_CHANNELS_8000 is too small");
251 #define IWM_NUM_2GHZ_CHANNELS 14
252 #define IWM_N_HW_ADDR_MASK 0xF
255 * XXX For now, there's simply a fixed set of rate table entries
256 * that are populated.
258 const struct iwm_rate {
262 { 2, IWM_RATE_1M_PLCP },
263 { 4, IWM_RATE_2M_PLCP },
264 { 11, IWM_RATE_5M_PLCP },
265 { 22, IWM_RATE_11M_PLCP },
266 { 12, IWM_RATE_6M_PLCP },
267 { 18, IWM_RATE_9M_PLCP },
268 { 24, IWM_RATE_12M_PLCP },
269 { 36, IWM_RATE_18M_PLCP },
270 { 48, IWM_RATE_24M_PLCP },
271 { 72, IWM_RATE_36M_PLCP },
272 { 96, IWM_RATE_48M_PLCP },
273 { 108, IWM_RATE_54M_PLCP },
275 #define IWM_RIDX_CCK 0
276 #define IWM_RIDX_OFDM 4
277 #define IWM_RIDX_MAX (nitems(iwm_rates)-1)
278 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
279 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
281 struct iwm_nvm_section {
286 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
287 static int iwm_firmware_store_section(struct iwm_softc *,
289 const uint8_t *, size_t);
290 static int iwm_set_default_calib(struct iwm_softc *, const void *);
291 static void iwm_fw_info_free(struct iwm_fw_info *);
292 static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
293 static void iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
294 static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
295 bus_size_t, bus_size_t);
296 static void iwm_dma_contig_free(struct iwm_dma_info *);
297 static int iwm_alloc_fwmem(struct iwm_softc *);
298 static int iwm_alloc_sched(struct iwm_softc *);
299 static int iwm_alloc_kw(struct iwm_softc *);
300 static int iwm_alloc_ict(struct iwm_softc *);
301 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
302 static void iwm_disable_rx_dma(struct iwm_softc *);
303 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
304 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
305 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
307 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
308 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
309 static void iwm_enable_interrupts(struct iwm_softc *);
310 static void iwm_restore_interrupts(struct iwm_softc *);
311 static void iwm_disable_interrupts(struct iwm_softc *);
312 static void iwm_ict_reset(struct iwm_softc *);
313 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
314 static void iwm_stop_device(struct iwm_softc *);
315 static void iwm_mvm_nic_config(struct iwm_softc *);
316 static int iwm_nic_rx_init(struct iwm_softc *);
317 static int iwm_nic_tx_init(struct iwm_softc *);
318 static int iwm_nic_init(struct iwm_softc *);
319 static int iwm_enable_txq(struct iwm_softc *, int, int, int);
320 static int iwm_post_alive(struct iwm_softc *);
321 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
322 uint16_t, uint8_t *, uint16_t *);
323 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
324 uint16_t *, uint32_t);
325 static uint32_t iwm_eeprom_channel_flags(uint16_t);
326 static void iwm_add_channel_band(struct iwm_softc *,
327 struct ieee80211_channel[], int, int *, int, size_t,
329 static void iwm_init_channel_map(struct ieee80211com *, int, int *,
330 struct ieee80211_channel[]);
331 static struct iwm_nvm_data *
332 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
333 const uint16_t *, const uint16_t *,
334 const uint16_t *, const uint16_t *,
336 static void iwm_free_nvm_data(struct iwm_nvm_data *);
337 static void iwm_set_hw_address_family_8000(struct iwm_softc *,
338 struct iwm_nvm_data *,
341 static int iwm_get_sku(const struct iwm_softc *, const uint16_t *,
343 static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
344 static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
346 static int iwm_get_n_hw_addrs(const struct iwm_softc *,
348 static void iwm_set_radio_cfg(const struct iwm_softc *,
349 struct iwm_nvm_data *, uint32_t);
350 static struct iwm_nvm_data *
351 iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
352 static int iwm_nvm_init(struct iwm_softc *);
353 static int iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
354 const uint8_t *, uint32_t);
355 static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
356 const uint8_t *, uint32_t);
357 static int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
358 static int iwm_load_cpu_sections_8000(struct iwm_softc *,
359 struct iwm_fw_sects *, int , int *);
360 static int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
361 static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
362 static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
363 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
364 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
365 static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
366 enum iwm_ucode_type);
367 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
368 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
369 static int iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
370 static int iwm_mvm_get_signal_strength(struct iwm_softc *,
371 struct iwm_rx_phy_info *);
372 static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
373 struct iwm_rx_packet *,
374 struct iwm_rx_data *);
375 static int iwm_get_noise(struct iwm_softc *sc,
376 const struct iwm_mvm_statistics_rx_non_phy *);
377 static void iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
378 struct iwm_rx_data *);
379 static int iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
380 struct iwm_rx_packet *,
382 static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
383 struct iwm_rx_data *);
384 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
386 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
389 static const struct iwm_rate *
390 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
391 struct mbuf *, struct iwm_tx_cmd *);
392 static int iwm_tx(struct iwm_softc *, struct mbuf *,
393 struct ieee80211_node *, int);
394 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
395 const struct ieee80211_bpf_params *);
396 static int iwm_mvm_flush_tx_path(struct iwm_softc *sc,
397 uint32_t tfd_msk, uint32_t flags);
398 static int iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
399 struct iwm_mvm_add_sta_cmd_v7 *,
401 static int iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
403 static int iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
404 static int iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
405 static int iwm_mvm_add_int_sta_common(struct iwm_softc *,
406 struct iwm_int_sta *,
407 const uint8_t *, uint16_t, uint16_t);
408 static int iwm_mvm_add_aux_sta(struct iwm_softc *);
409 static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
410 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *);
411 static int iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
412 static int iwm_release(struct iwm_softc *, struct iwm_node *);
413 static struct ieee80211_node *
414 iwm_node_alloc(struct ieee80211vap *,
415 const uint8_t[IEEE80211_ADDR_LEN]);
416 static void iwm_setrates(struct iwm_softc *, struct iwm_node *);
417 static int iwm_media_change(struct ifnet *);
418 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
419 static void iwm_endscan_cb(void *, int);
420 static void iwm_mvm_fill_sf_command(struct iwm_softc *,
421 struct iwm_sf_cfg_cmd *,
422 struct ieee80211_node *);
423 static int iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
424 static int iwm_send_bt_init_conf(struct iwm_softc *);
425 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
426 static void iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
427 static int iwm_init_hw(struct iwm_softc *);
428 static void iwm_init(struct iwm_softc *);
429 static void iwm_start(struct iwm_softc *);
430 static void iwm_stop(struct iwm_softc *);
431 static void iwm_watchdog(void *);
432 static void iwm_parent(struct ieee80211com *);
435 iwm_desc_lookup(uint32_t);
436 static void iwm_nic_error(struct iwm_softc *);
437 static void iwm_nic_umac_error(struct iwm_softc *);
439 static void iwm_notif_intr(struct iwm_softc *);
440 static void iwm_intr(void *);
441 static int iwm_attach(device_t);
442 static int iwm_is_valid_ether_addr(uint8_t *);
443 static void iwm_preinit(void *);
444 static int iwm_detach_local(struct iwm_softc *sc, int);
445 static void iwm_init_task(void *);
446 static void iwm_radiotap_attach(struct iwm_softc *);
447 static struct ieee80211vap *
448 iwm_vap_create(struct ieee80211com *,
449 const char [IFNAMSIZ], int,
450 enum ieee80211_opmode, int,
451 const uint8_t [IEEE80211_ADDR_LEN],
452 const uint8_t [IEEE80211_ADDR_LEN]);
453 static void iwm_vap_delete(struct ieee80211vap *);
454 static void iwm_scan_start(struct ieee80211com *);
455 static void iwm_scan_end(struct ieee80211com *);
456 static void iwm_update_mcast(struct ieee80211com *);
457 static void iwm_set_channel(struct ieee80211com *);
458 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
459 static void iwm_scan_mindwell(struct ieee80211_scan_state *);
460 static int iwm_detach(device_t);
467 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
469 const struct iwm_fw_cscheme_list *l = (const void *)data;
471 if (dlen < sizeof(*l) ||
472 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
475 /* we don't actually store anything for now, always use s/w crypto */
481 iwm_firmware_store_section(struct iwm_softc *sc,
482 enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
484 struct iwm_fw_sects *fws;
485 struct iwm_fw_onesect *fwone;
487 if (type >= IWM_UCODE_TYPE_MAX)
489 if (dlen < sizeof(uint32_t))
492 fws = &sc->sc_fw.fw_sects[type];
493 if (fws->fw_count >= IWM_UCODE_SECT_MAX)
496 fwone = &fws->fw_sect[fws->fw_count];
498 /* first 32bit are device load offset */
499 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
502 fwone->fws_data = data + sizeof(uint32_t);
503 fwone->fws_len = dlen - sizeof(uint32_t);
510 #define IWM_DEFAULT_SCAN_CHANNELS 40
512 /* iwlwifi: iwl-drv.c */
513 struct iwm_tlv_calib_data {
515 struct iwm_tlv_calib_ctrl calib;
519 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
521 const struct iwm_tlv_calib_data *def_calib = data;
522 uint32_t ucode_type = le32toh(def_calib->ucode_type);
524 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
525 device_printf(sc->sc_dev,
526 "Wrong ucode_type %u for default "
527 "calibration.\n", ucode_type);
531 sc->sc_default_calib[ucode_type].flow_trigger =
532 def_calib->calib.flow_trigger;
533 sc->sc_default_calib[ucode_type].event_trigger =
534 def_calib->calib.event_trigger;
540 iwm_fw_info_free(struct iwm_fw_info *fw)
542 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
544 /* don't touch fw->fw_status */
545 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
549 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
551 struct iwm_fw_info *fw = &sc->sc_fw;
552 const struct iwm_tlv_ucode_header *uhdr;
553 struct iwm_ucode_tlv tlv;
554 enum iwm_ucode_tlv_type tlv_type;
555 const struct firmware *fwp;
560 if (fw->fw_status == IWM_FW_STATUS_DONE &&
561 ucode_type != IWM_UCODE_TYPE_INIT)
564 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
565 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
566 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
568 if (fw->fw_fp != NULL)
569 iwm_fw_info_free(fw);
572 * Load firmware into driver memory.
576 fwp = firmware_get(sc->cfg->fw_name);
579 device_printf(sc->sc_dev,
580 "could not read firmware %s (error %d)\n",
581 sc->cfg->fw_name, error);
586 /* (Re-)Initialize default values. */
587 sc->sc_capaflags = 0;
588 sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
589 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
590 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
593 * Parse firmware contents
596 uhdr = (const void *)fw->fw_fp->data;
597 if (*(const uint32_t *)fw->fw_fp->data != 0
598 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
599 device_printf(sc->sc_dev, "invalid firmware %s\n",
605 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
606 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
607 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
608 IWM_UCODE_API(le32toh(uhdr->ver)));
610 len = fw->fw_fp->datasize - sizeof(*uhdr);
612 while (len >= sizeof(tlv)) {
614 const void *tlv_data;
616 memcpy(&tlv, data, sizeof(tlv));
617 tlv_len = le32toh(tlv.length);
618 tlv_type = le32toh(tlv.type);
625 device_printf(sc->sc_dev,
626 "firmware too short: %zu bytes\n",
632 switch ((int)tlv_type) {
633 case IWM_UCODE_TLV_PROBE_MAX_LEN:
634 if (tlv_len < sizeof(uint32_t)) {
635 device_printf(sc->sc_dev,
636 "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
642 sc->sc_capa_max_probe_len
643 = le32toh(*(const uint32_t *)tlv_data);
644 /* limit it to something sensible */
645 if (sc->sc_capa_max_probe_len >
646 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
647 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
648 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
649 "ridiculous\n", __func__);
654 case IWM_UCODE_TLV_PAN:
656 device_printf(sc->sc_dev,
657 "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
663 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
665 case IWM_UCODE_TLV_FLAGS:
666 if (tlv_len < sizeof(uint32_t)) {
667 device_printf(sc->sc_dev,
668 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
675 * Apparently there can be many flags, but Linux driver
676 * parses only the first one, and so do we.
678 * XXX: why does this override IWM_UCODE_TLV_PAN?
679 * Intentional or a bug? Observations from
680 * current firmware file:
681 * 1) TLV_PAN is parsed first
682 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
683 * ==> this resets TLV_PAN to itself... hnnnk
685 sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
687 case IWM_UCODE_TLV_CSCHEME:
688 if ((error = iwm_store_cscheme(sc,
689 tlv_data, tlv_len)) != 0) {
690 device_printf(sc->sc_dev,
691 "%s: iwm_store_cscheme(): returned %d\n",
697 case IWM_UCODE_TLV_NUM_OF_CPU: {
699 if (tlv_len != sizeof(uint32_t)) {
700 device_printf(sc->sc_dev,
701 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
707 num_cpu = le32toh(*(const uint32_t *)tlv_data);
708 if (num_cpu < 1 || num_cpu > 2) {
709 device_printf(sc->sc_dev,
710 "%s: Driver supports only 1 or 2 CPUs\n",
717 case IWM_UCODE_TLV_SEC_RT:
718 if ((error = iwm_firmware_store_section(sc,
719 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
720 device_printf(sc->sc_dev,
721 "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
727 case IWM_UCODE_TLV_SEC_INIT:
728 if ((error = iwm_firmware_store_section(sc,
729 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
730 device_printf(sc->sc_dev,
731 "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
737 case IWM_UCODE_TLV_SEC_WOWLAN:
738 if ((error = iwm_firmware_store_section(sc,
739 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
740 device_printf(sc->sc_dev,
741 "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
747 case IWM_UCODE_TLV_DEF_CALIB:
748 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
749 device_printf(sc->sc_dev,
750 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
753 (int) sizeof(struct iwm_tlv_calib_data));
757 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
758 device_printf(sc->sc_dev,
759 "%s: iwm_set_default_calib() failed: %d\n",
765 case IWM_UCODE_TLV_PHY_SKU:
766 if (tlv_len != sizeof(uint32_t)) {
768 device_printf(sc->sc_dev,
769 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
774 sc->sc_fw.phy_config =
775 le32toh(*(const uint32_t *)tlv_data);
776 sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
777 IWM_FW_PHY_CFG_TX_CHAIN) >>
778 IWM_FW_PHY_CFG_TX_CHAIN_POS;
779 sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
780 IWM_FW_PHY_CFG_RX_CHAIN) >>
781 IWM_FW_PHY_CFG_RX_CHAIN_POS;
784 case IWM_UCODE_TLV_API_CHANGES_SET: {
785 const struct iwm_ucode_api *api;
786 if (tlv_len != sizeof(*api)) {
790 api = (const struct iwm_ucode_api *)tlv_data;
791 /* Flags may exceed 32 bits in future firmware. */
792 if (le32toh(api->api_index) > 0) {
793 device_printf(sc->sc_dev,
794 "unsupported API index %d\n",
795 le32toh(api->api_index));
798 sc->sc_ucode_api = le32toh(api->api_flags);
802 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
803 const struct iwm_ucode_capa *capa;
805 if (tlv_len != sizeof(*capa)) {
809 capa = (const struct iwm_ucode_capa *)tlv_data;
810 idx = le32toh(capa->api_index);
811 if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
812 device_printf(sc->sc_dev,
813 "unsupported API index %d\n", idx);
816 for (i = 0; i < 32; i++) {
817 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
819 setbit(sc->sc_enabled_capa, i + (32 * idx));
824 case 48: /* undocumented TLV */
825 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
826 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
827 /* ignore, not used by current driver */
830 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
831 if ((error = iwm_firmware_store_section(sc,
832 IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
837 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
838 if (tlv_len != sizeof(uint32_t)) {
842 sc->sc_capa_n_scan_channels =
843 le32toh(*(const uint32_t *)tlv_data);
846 case IWM_UCODE_TLV_FW_VERSION:
847 if (tlv_len != sizeof(uint32_t) * 3) {
851 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
853 le32toh(((const uint32_t *)tlv_data)[0]),
854 le32toh(((const uint32_t *)tlv_data)[1]),
855 le32toh(((const uint32_t *)tlv_data)[2]));
859 device_printf(sc->sc_dev,
860 "%s: unknown firmware section %d, abort\n",
866 len -= roundup(tlv_len, 4);
867 data += roundup(tlv_len, 4);
870 KASSERT(error == 0, ("unhandled error"));
874 device_printf(sc->sc_dev, "firmware parse error %d, "
875 "section type %d\n", error, tlv_type);
878 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
879 device_printf(sc->sc_dev,
880 "device uses unsupported power ops\n");
886 fw->fw_status = IWM_FW_STATUS_NONE;
887 if (fw->fw_fp != NULL)
888 iwm_fw_info_free(fw);
890 fw->fw_status = IWM_FW_STATUS_DONE;
897 * DMA resource routines
901 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
905 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
906 *(bus_addr_t *)arg = segs[0].ds_addr;
910 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
911 bus_size_t size, bus_size_t alignment)
920 error = bus_dma_tag_create(tag, alignment,
921 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
922 1, size, 0, NULL, NULL, &dma->tag);
926 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
927 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
931 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
932 iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
934 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
939 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
944 iwm_dma_contig_free(dma);
950 iwm_dma_contig_free(struct iwm_dma_info *dma)
952 if (dma->vaddr != NULL) {
953 bus_dmamap_sync(dma->tag, dma->map,
954 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
955 bus_dmamap_unload(dma->tag, dma->map);
956 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
959 if (dma->tag != NULL) {
960 bus_dma_tag_destroy(dma->tag);
965 /* fwmem is used to load firmware onto the card */
967 iwm_alloc_fwmem(struct iwm_softc *sc)
969 /* Must be aligned on a 16-byte boundary. */
970 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
971 sc->sc_fwdmasegsz, 16);
974 /* tx scheduler rings. not used? */
976 iwm_alloc_sched(struct iwm_softc *sc)
978 /* TX scheduler rings must be aligned on a 1KB boundary. */
979 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
980 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
983 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */
985 iwm_alloc_kw(struct iwm_softc *sc)
987 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
990 /* interrupt cause table */
992 iwm_alloc_ict(struct iwm_softc *sc)
994 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
995 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
999 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1006 /* Allocate RX descriptors (256-byte aligned). */
1007 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1008 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1010 device_printf(sc->sc_dev,
1011 "could not allocate RX ring DMA memory\n");
1014 ring->desc = ring->desc_dma.vaddr;
1016 /* Allocate RX status area (16-byte aligned). */
1017 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1018 sizeof(*ring->stat), 16);
1020 device_printf(sc->sc_dev,
1021 "could not allocate RX status DMA memory\n");
1024 ring->stat = ring->stat_dma.vaddr;
1026 /* Create RX buffer DMA tag. */
1027 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1028 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1029 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1031 device_printf(sc->sc_dev,
1032 "%s: could not create RX buf DMA tag, error %d\n",
1037 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1038 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1040 device_printf(sc->sc_dev,
1041 "%s: could not create RX buf DMA map, error %d\n",
1046 * Allocate and map RX buffers.
1048 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1049 struct iwm_rx_data *data = &ring->data[i];
1050 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1052 device_printf(sc->sc_dev,
1053 "%s: could not create RX buf DMA map, error %d\n",
1059 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1065 fail: iwm_free_rx_ring(sc, ring);
1070 iwm_disable_rx_dma(struct iwm_softc *sc)
1072 /* XXX conditional nic locks are stupid */
1073 /* XXX print out if we can't lock the NIC? */
1074 if (iwm_nic_lock(sc)) {
1075 /* XXX handle if RX stop doesn't finish? */
1076 (void) iwm_pcie_rx_stop(sc);
1082 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1084 /* Reset the ring state */
1088 * The hw rx ring index in shared memory must also be cleared,
1089 * otherwise the discrepancy can cause reprocessing chaos.
1091 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1095 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1099 iwm_dma_contig_free(&ring->desc_dma);
1100 iwm_dma_contig_free(&ring->stat_dma);
1102 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1103 struct iwm_rx_data *data = &ring->data[i];
1105 if (data->m != NULL) {
1106 bus_dmamap_sync(ring->data_dmat, data->map,
1107 BUS_DMASYNC_POSTREAD);
1108 bus_dmamap_unload(ring->data_dmat, data->map);
1112 if (data->map != NULL) {
1113 bus_dmamap_destroy(ring->data_dmat, data->map);
1117 if (ring->spare_map != NULL) {
1118 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1119 ring->spare_map = NULL;
1121 if (ring->data_dmat != NULL) {
1122 bus_dma_tag_destroy(ring->data_dmat);
1123 ring->data_dmat = NULL;
1128 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1140 /* Allocate TX descriptors (256-byte aligned). */
1141 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1142 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1144 device_printf(sc->sc_dev,
1145 "could not allocate TX ring DMA memory\n");
1148 ring->desc = ring->desc_dma.vaddr;
1151 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1152 * to allocate commands space for other rings.
1154 if (qid > IWM_MVM_CMD_QUEUE)
1157 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1158 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1160 device_printf(sc->sc_dev,
1161 "could not allocate TX cmd DMA memory\n");
1164 ring->cmd = ring->cmd_dma.vaddr;
1166 /* FW commands may require more mapped space than packets. */
1167 if (qid == IWM_MVM_CMD_QUEUE) {
1168 maxsize = IWM_RBUF_SIZE;
1172 nsegments = IWM_MAX_SCATTER - 2;
1175 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1176 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1177 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1179 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1183 paddr = ring->cmd_dma.paddr;
1184 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1185 struct iwm_tx_data *data = &ring->data[i];
1187 data->cmd_paddr = paddr;
1188 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1189 + offsetof(struct iwm_tx_cmd, scratch);
1190 paddr += sizeof(struct iwm_device_cmd);
1192 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1194 device_printf(sc->sc_dev,
1195 "could not create TX buf DMA map\n");
1199 KASSERT(paddr == ring->cmd_dma.paddr + size,
1200 ("invalid physical address"));
1203 fail: iwm_free_tx_ring(sc, ring);
1208 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1212 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1213 struct iwm_tx_data *data = &ring->data[i];
1215 if (data->m != NULL) {
1216 bus_dmamap_sync(ring->data_dmat, data->map,
1217 BUS_DMASYNC_POSTWRITE);
1218 bus_dmamap_unload(ring->data_dmat, data->map);
1223 /* Clear TX descriptors. */
1224 memset(ring->desc, 0, ring->desc_dma.size);
1225 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1226 BUS_DMASYNC_PREWRITE);
1227 sc->qfullmsk &= ~(1 << ring->qid);
1233 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1237 iwm_dma_contig_free(&ring->desc_dma);
1238 iwm_dma_contig_free(&ring->cmd_dma);
1240 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1241 struct iwm_tx_data *data = &ring->data[i];
1243 if (data->m != NULL) {
1244 bus_dmamap_sync(ring->data_dmat, data->map,
1245 BUS_DMASYNC_POSTWRITE);
1246 bus_dmamap_unload(ring->data_dmat, data->map);
1250 if (data->map != NULL) {
1251 bus_dmamap_destroy(ring->data_dmat, data->map);
1255 if (ring->data_dmat != NULL) {
1256 bus_dma_tag_destroy(ring->data_dmat);
1257 ring->data_dmat = NULL;
1262 * High-level hardware frobbing routines
1266 iwm_enable_interrupts(struct iwm_softc *sc)
1268 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1269 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1273 iwm_restore_interrupts(struct iwm_softc *sc)
1275 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1279 iwm_disable_interrupts(struct iwm_softc *sc)
1281 /* disable interrupts */
1282 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1284 /* acknowledge all interrupts */
1285 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1286 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1290 iwm_ict_reset(struct iwm_softc *sc)
1292 iwm_disable_interrupts(sc);
1294 /* Reset ICT table. */
1295 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1298 /* Set physical address of ICT table (4KB aligned). */
1299 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1300 IWM_CSR_DRAM_INT_TBL_ENABLE
1301 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1302 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1303 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1305 /* Switch to ICT interrupt mode in driver. */
1306 sc->sc_flags |= IWM_FLAG_USE_ICT;
1308 /* Re-enable interrupts. */
1309 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1310 iwm_enable_interrupts(sc);
1313 /* iwlwifi pcie/trans.c */
1316 * Since this .. hard-resets things, it's time to actually
1317 * mark the first vap (if any) as having no mac context.
1318 * It's annoying, but since the driver is potentially being
1319 * stop/start'ed whilst active (thanks openbsd port!) we
1320 * have to correctly track this.
1323 iwm_stop_device(struct iwm_softc *sc)
1325 struct ieee80211com *ic = &sc->sc_ic;
1326 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1330 /* tell the device to stop sending interrupts */
1331 iwm_disable_interrupts(sc);
1334 * FreeBSD-local: mark the first vap as not-uploaded,
1335 * so the next transition through auth/assoc
1336 * will correctly populate the MAC context.
1339 struct iwm_vap *iv = IWM_VAP(vap);
1340 iv->is_uploaded = 0;
1343 /* device going down, Stop using ICT table */
1344 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1346 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1348 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1350 if (iwm_nic_lock(sc)) {
1351 /* Stop each Tx DMA channel */
1352 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1354 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1355 mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1358 /* Wait for DMA channels to be idle */
1359 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1361 device_printf(sc->sc_dev,
1362 "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1363 IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1367 iwm_disable_rx_dma(sc);
1370 iwm_reset_rx_ring(sc, &sc->rxq);
1372 /* Reset all TX rings. */
1373 for (qid = 0; qid < nitems(sc->txq); qid++)
1374 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1377 * Power-down device's busmaster DMA clocks
1379 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1382 /* Make sure (redundant) we've released our request to stay awake */
1383 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1384 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1386 /* Stop the device, and put it in low power state */
1389 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1390 * Clean again the interrupt here
1392 iwm_disable_interrupts(sc);
1393 /* stop and reset the on-board processor */
1394 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1397 * Even if we stop the HW, we still want the RF kill
1400 iwm_enable_rfkill_int(sc);
1401 iwm_check_rfkill(sc);
1404 /* iwlwifi: mvm/ops.c */
1406 iwm_mvm_nic_config(struct iwm_softc *sc)
1408 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1409 uint32_t reg_val = 0;
1410 uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1412 radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1413 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1414 radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1415 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1416 radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1417 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1420 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1421 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1422 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1423 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1425 /* radio configuration */
1426 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1427 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1428 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1430 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1432 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1433 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1434 radio_cfg_step, radio_cfg_dash);
1437 * W/A : NIC is stuck in a reset state after Early PCIe power off
1438 * (PCIe power is lost before PERST# is asserted), causing ME FW
1439 * to lose ownership and not being able to obtain it back.
1441 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1442 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1443 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1444 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1449 iwm_nic_rx_init(struct iwm_softc *sc)
1451 if (!iwm_nic_lock(sc))
1455 * Initialize RX ring. This is from the iwn driver.
1457 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1460 iwm_disable_rx_dma(sc);
1461 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1462 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1463 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1464 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1466 /* Set physical address of RX ring (256-byte aligned). */
1468 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1470 /* Set physical address of RX status (16-byte aligned). */
1472 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1475 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1476 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1477 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1478 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1479 IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
1480 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1481 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1482 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1484 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1486 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1487 if (sc->cfg->host_interrupt_operation_mode)
1488 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1491 * Thus sayeth el jefe (iwlwifi) via a comment:
1493 * This value should initially be 0 (before preparing any
1494 * RBs), should be 8 after preparing the first 8 RBs (for example)
1496 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1504 iwm_nic_tx_init(struct iwm_softc *sc)
1508 if (!iwm_nic_lock(sc))
1511 /* Deactivate TX scheduler. */
1512 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1514 /* Set physical address of "keep warm" page (16-byte aligned). */
1515 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1517 /* Initialize TX rings. */
1518 for (qid = 0; qid < nitems(sc->txq); qid++) {
1519 struct iwm_tx_ring *txq = &sc->txq[qid];
1521 /* Set physical address of TX ring (256-byte aligned). */
1522 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1523 txq->desc_dma.paddr >> 8);
1524 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1525 "%s: loading ring %d descriptors (%p) at %lx\n",
1528 (unsigned long) (txq->desc_dma.paddr >> 8));
1531 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1539 iwm_nic_init(struct iwm_softc *sc)
1544 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1547 iwm_mvm_nic_config(sc);
1549 if ((error = iwm_nic_rx_init(sc)) != 0)
1553 * Ditto for TX, from iwn
1555 if ((error = iwm_nic_tx_init(sc)) != 0)
1558 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1559 "%s: shadow registers enabled\n", __func__);
1560 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1565 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1573 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1575 if (!iwm_nic_lock(sc)) {
1576 device_printf(sc->sc_dev,
1577 "%s: cannot enable txq %d\n",
1583 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1585 if (qid == IWM_MVM_CMD_QUEUE) {
1586 /* unactivate before configuration */
1587 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1588 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1589 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1591 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1593 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1595 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1596 /* Set scheduler window size and frame limit. */
1598 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1600 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1601 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1602 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1603 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1605 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1606 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1607 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1608 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1609 IWM_SCD_QUEUE_STTS_REG_MSK);
1611 struct iwm_scd_txq_cfg_cmd cmd;
1616 memset(&cmd, 0, sizeof(cmd));
1617 cmd.scd_queue = qid;
1619 cmd.sta_id = sta_id;
1622 cmd.window = IWM_FRAME_LIMIT;
1624 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1627 device_printf(sc->sc_dev,
1628 "cannot enable txq %d\n", qid);
1632 if (!iwm_nic_lock(sc))
1636 iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1637 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1641 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1642 __func__, qid, fifo);
1648 iwm_post_alive(struct iwm_softc *sc)
1654 if (!iwm_nic_lock(sc))
1657 base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1658 if (sc->sched_base != base) {
1659 device_printf(sc->sc_dev,
1660 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1661 __func__, sc->sched_base, base);
1666 /* Clear TX scheduler state in SRAM. */
1667 nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1668 IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1670 error = iwm_write_mem(sc,
1671 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1676 /* Set physical address of TX scheduler rings (1KB aligned). */
1677 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1679 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1683 /* enable command channel */
1684 error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1688 if (!iwm_nic_lock(sc))
1691 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1693 /* Enable DMA channels. */
1694 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1695 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1696 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1697 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1700 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1701 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1703 /* Enable L1-Active */
1704 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1705 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1706 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1715 * NVM read access and content parsing. We do not support
1716 * external NVM or writing NVM.
1720 /* Default NVM size to read */
1721 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1723 #define IWM_NVM_WRITE_OPCODE 1
1724 #define IWM_NVM_READ_OPCODE 0
1726 /* load nvm chunk response */
1728 IWM_READ_NVM_CHUNK_SUCCEED = 0,
1729 IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1733 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1734 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1736 struct iwm_nvm_access_cmd nvm_access_cmd = {
1737 .offset = htole16(offset),
1738 .length = htole16(length),
1739 .type = htole16(section),
1740 .op_code = IWM_NVM_READ_OPCODE,
1742 struct iwm_nvm_access_resp *nvm_resp;
1743 struct iwm_rx_packet *pkt;
1744 struct iwm_host_cmd cmd = {
1745 .id = IWM_NVM_ACCESS_CMD,
1746 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1747 .data = { &nvm_access_cmd, },
1749 int ret, bytes_read, offset_read;
1752 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1754 ret = iwm_send_cmd(sc, &cmd);
1756 device_printf(sc->sc_dev,
1757 "Could not send NVM_ACCESS command (error=%d)\n", ret);
1763 /* Extract NVM response */
1764 nvm_resp = (void *)pkt->data;
1765 ret = le16toh(nvm_resp->status);
1766 bytes_read = le16toh(nvm_resp->length);
1767 offset_read = le16toh(nvm_resp->offset);
1768 resp_data = nvm_resp->data;
1770 if ((offset != 0) &&
1771 (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1773 * meaning of NOT_VALID_ADDRESS:
1774 * driver try to read chunk from address that is
1775 * multiple of 2K and got an error since addr is empty.
1776 * meaning of (offset != 0): driver already
1777 * read valid data from another chunk so this case
1780 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1781 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1786 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1787 "NVM access command failed with status %d\n", ret);
1793 if (offset_read != offset) {
1794 device_printf(sc->sc_dev,
1795 "NVM ACCESS response with invalid offset %d\n",
1801 if (bytes_read > length) {
1802 device_printf(sc->sc_dev,
1803 "NVM ACCESS response with too much data "
1804 "(%d bytes requested, %d bytes received)\n",
1805 length, bytes_read);
1810 /* Write data to NVM */
1811 memcpy(data + offset, resp_data, bytes_read);
1815 iwm_free_resp(sc, &cmd);
1820 * Reads an NVM section completely.
1821 * NICs prior to 7000 family don't have a real NVM, but just read
1822 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1823 * by uCode, we need to manually check in this case that we don't
1824 * overflow and try to read more than the EEPROM size.
1825 * For 7000 family NICs, we supply the maximal size we can read, and
1826 * the uCode fills the response with as much data as we can,
1827 * without overflowing, so no check is needed.
1830 iwm_nvm_read_section(struct iwm_softc *sc,
1831 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1833 uint16_t seglen, length, offset = 0;
1836 /* Set nvm section read length */
1837 length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1841 /* Read the NVM until exhausted (reading less than requested) */
1842 while (seglen == length) {
1843 /* Check no memory assumptions fail and cause an overflow */
1844 if ((size_read + offset + length) >
1845 sc->cfg->eeprom_size) {
1846 device_printf(sc->sc_dev,
1847 "EEPROM size is too small for NVM\n");
1851 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1853 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1854 "Cannot read NVM from section %d offset %d, length %d\n",
1855 section, offset, length);
1861 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1862 "NVM section %d read completed\n", section);
1868 * BEGIN IWM_NVM_PARSE
1871 /* iwlwifi/iwl-nvm-parse.c */
1873 /* NVM offsets (in words) definitions */
1874 enum iwm_nvm_offsets {
1875 /* NVM HW-Section offset (in words) definitions */
1878 /* NVM SW-Section offset (in words) definitions */
1879 IWM_NVM_SW_SECTION = 0x1C0,
1880 IWM_NVM_VERSION = 0,
1884 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1886 /* NVM calibration section offset (in words) definitions */
1887 IWM_NVM_CALIB_SECTION = 0x2B8,
1888 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1891 enum iwm_8000_nvm_offsets {
1892 /* NVM HW-Section offset (in words) definitions */
1893 IWM_HW_ADDR0_WFPM_8000 = 0x12,
1894 IWM_HW_ADDR1_WFPM_8000 = 0x16,
1895 IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1896 IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1897 IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1899 /* NVM SW-Section offset (in words) definitions */
1900 IWM_NVM_SW_SECTION_8000 = 0x1C0,
1901 IWM_NVM_VERSION_8000 = 0,
1902 IWM_RADIO_CFG_8000 = 0,
1904 IWM_N_HW_ADDRS_8000 = 3,
1906 /* NVM REGULATORY -Section offset (in words) definitions */
1907 IWM_NVM_CHANNELS_8000 = 0,
1908 IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1909 IWM_NVM_LAR_OFFSET_8000 = 0x507,
1910 IWM_NVM_LAR_ENABLED_8000 = 0x7,
1912 /* NVM calibration section offset (in words) definitions */
1913 IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1914 IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1917 /* SKU Capabilities (actual values from NVM definition) */
1919 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
1920 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
1921 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
1922 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
1925 /* radio config bits (actual values from NVM definition) */
1926 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
1927 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
1928 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
1929 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
1930 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
1931 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1933 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x) (x & 0xF)
1934 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x) ((x >> 4) & 0xF)
1935 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x) ((x >> 8) & 0xF)
1936 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x) ((x >> 12) & 0xFFF)
1937 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x) ((x >> 24) & 0xF)
1938 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x) ((x >> 28) & 0xF)
1940 #define DEFAULT_MAX_TX_POWER 16
1943 * enum iwm_nvm_channel_flags - channel flags in NVM
1944 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1945 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1946 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1947 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1948 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1949 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1950 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1951 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1952 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1953 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1955 enum iwm_nvm_channel_flags {
1956 IWM_NVM_CHANNEL_VALID = (1 << 0),
1957 IWM_NVM_CHANNEL_IBSS = (1 << 1),
1958 IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1959 IWM_NVM_CHANNEL_RADAR = (1 << 4),
1960 IWM_NVM_CHANNEL_DFS = (1 << 7),
1961 IWM_NVM_CHANNEL_WIDE = (1 << 8),
1962 IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1963 IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1964 IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1968 * Translate EEPROM flags to net80211.
1971 iwm_eeprom_channel_flags(uint16_t ch_flags)
1976 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1977 nflags |= IEEE80211_CHAN_PASSIVE;
1978 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1979 nflags |= IEEE80211_CHAN_NOADHOC;
1980 if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1981 nflags |= IEEE80211_CHAN_DFS;
1983 nflags |= IEEE80211_CHAN_NOADHOC;
1990 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1991 int maxchans, int *nchans, int ch_idx, size_t ch_num,
1992 const uint8_t bands[])
1994 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2000 for (; ch_idx < ch_num; ch_idx++) {
2001 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2002 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2003 ieee = iwm_nvm_channels[ch_idx];
2005 ieee = iwm_nvm_channels_8000[ch_idx];
2007 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2008 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2009 "Ch. %d Flags %x [%sGHz] - No traffic\n",
2011 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2016 nflags = iwm_eeprom_channel_flags(ch_flags);
2017 error = ieee80211_add_channel(chans, maxchans, nchans,
2018 ieee, 0, 0, nflags, bands);
2022 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2023 "Ch. %d Flags %x [%sGHz] - Added\n",
2025 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2031 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2032 struct ieee80211_channel chans[])
2034 struct iwm_softc *sc = ic->ic_softc;
2035 struct iwm_nvm_data *data = sc->nvm_data;
2036 uint8_t bands[IEEE80211_MODE_BYTES];
2039 memset(bands, 0, sizeof(bands));
2040 /* 1-13: 11b/g channels. */
2041 setbit(bands, IEEE80211_MODE_11B);
2042 setbit(bands, IEEE80211_MODE_11G);
2043 iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2044 IWM_NUM_2GHZ_CHANNELS - 1, bands);
2046 /* 14: 11b channel only. */
2047 clrbit(bands, IEEE80211_MODE_11G);
2048 iwm_add_channel_band(sc, chans, maxchans, nchans,
2049 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2051 if (data->sku_cap_band_52GHz_enable) {
2052 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2053 ch_num = nitems(iwm_nvm_channels);
2055 ch_num = nitems(iwm_nvm_channels_8000);
2056 memset(bands, 0, sizeof(bands));
2057 setbit(bands, IEEE80211_MODE_11A);
2058 iwm_add_channel_band(sc, chans, maxchans, nchans,
2059 IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2064 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2065 const uint16_t *mac_override, const uint16_t *nvm_hw)
2067 const uint8_t *hw_addr;
2070 static const uint8_t reserved_mac[] = {
2071 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2074 hw_addr = (const uint8_t *)(mac_override +
2075 IWM_MAC_ADDRESS_OVERRIDE_8000);
2078 * Store the MAC address from MAO section.
2079 * No byte swapping is required in MAO section
2081 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2084 * Force the use of the OTP MAC address in case of reserved MAC
2085 * address in the NVM, or if address is given but invalid.
2087 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2088 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2089 iwm_is_valid_ether_addr(data->hw_addr) &&
2090 !IEEE80211_IS_MULTICAST(data->hw_addr))
2093 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2094 "%s: mac address from nvm override section invalid\n",
2099 /* read the mac address from WFMP registers */
2100 uint32_t mac_addr0 =
2101 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2102 uint32_t mac_addr1 =
2103 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2105 hw_addr = (const uint8_t *)&mac_addr0;
2106 data->hw_addr[0] = hw_addr[3];
2107 data->hw_addr[1] = hw_addr[2];
2108 data->hw_addr[2] = hw_addr[1];
2109 data->hw_addr[3] = hw_addr[0];
2111 hw_addr = (const uint8_t *)&mac_addr1;
2112 data->hw_addr[4] = hw_addr[1];
2113 data->hw_addr[5] = hw_addr[0];
2118 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2119 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2123 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2124 const uint16_t *phy_sku)
2126 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2127 return le16_to_cpup(nvm_sw + IWM_SKU);
2129 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2133 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2135 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2136 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2138 return le32_to_cpup((const uint32_t *)(nvm_sw +
2139 IWM_NVM_VERSION_8000));
2143 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2144 const uint16_t *phy_sku)
2146 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2147 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2149 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2153 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2157 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2158 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2160 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2162 return n_hw_addr & IWM_N_HW_ADDR_MASK;
2166 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2169 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2170 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2171 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2172 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2173 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2177 /* set the radio configuration for family 8000 */
2178 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2179 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2180 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2181 data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2182 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2183 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2187 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2188 const uint16_t *nvm_hw, const uint16_t *mac_override)
2190 #ifdef notyet /* for FAMILY 9000 */
2191 if (cfg->mac_addr_from_csr) {
2192 iwm_set_hw_address_from_csr(sc, data);
2195 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2196 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2198 /* The byte order is little endian 16 bit, meaning 214365 */
2199 data->hw_addr[0] = hw_addr[1];
2200 data->hw_addr[1] = hw_addr[0];
2201 data->hw_addr[2] = hw_addr[3];
2202 data->hw_addr[3] = hw_addr[2];
2203 data->hw_addr[4] = hw_addr[5];
2204 data->hw_addr[5] = hw_addr[4];
2206 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2209 if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2210 device_printf(sc->sc_dev, "no valid mac address was found\n");
2217 static struct iwm_nvm_data *
2218 iwm_parse_nvm_data(struct iwm_softc *sc,
2219 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2220 const uint16_t *nvm_calib, const uint16_t *mac_override,
2221 const uint16_t *phy_sku, const uint16_t *regulatory)
2223 struct iwm_nvm_data *data;
2224 uint32_t sku, radio_cfg;
2226 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2227 data = malloc(sizeof(*data) +
2228 IWM_NUM_CHANNELS * sizeof(uint16_t),
2229 M_DEVBUF, M_NOWAIT | M_ZERO);
2231 data = malloc(sizeof(*data) +
2232 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2233 M_DEVBUF, M_NOWAIT | M_ZERO);
2238 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2240 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2241 iwm_set_radio_cfg(sc, data, radio_cfg);
2243 sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2244 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2245 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2246 data->sku_cap_11n_enable = 0;
2248 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2250 /* If no valid mac address was found - bail out */
2251 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2252 free(data, M_DEVBUF);
2256 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2257 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2258 IWM_NUM_CHANNELS * sizeof(uint16_t));
2260 memcpy(data->nvm_ch_flags, ®ulatory[IWM_NVM_CHANNELS_8000],
2261 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2268 iwm_free_nvm_data(struct iwm_nvm_data *data)
2271 free(data, M_DEVBUF);
2274 static struct iwm_nvm_data *
2275 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2277 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2279 /* Checking for required sections */
2280 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2281 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2282 !sections[sc->cfg->nvm_hw_section_num].data) {
2283 device_printf(sc->sc_dev,
2284 "Can't parse empty OTP/NVM sections\n");
2287 } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2288 /* SW and REGULATORY sections are mandatory */
2289 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2290 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2291 device_printf(sc->sc_dev,
2292 "Can't parse empty OTP/NVM sections\n");
2295 /* MAC_OVERRIDE or at least HW section must exist */
2296 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2297 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2298 device_printf(sc->sc_dev,
2299 "Can't parse mac_address, empty sections\n");
2303 /* PHY_SKU section is mandatory in B0 */
2304 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2305 device_printf(sc->sc_dev,
2306 "Can't parse phy_sku in B0, empty sections\n");
2310 panic("unknown device family %d\n", sc->cfg->device_family);
2313 hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2314 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2315 calib = (const uint16_t *)
2316 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2317 regulatory = (const uint16_t *)
2318 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2319 mac_override = (const uint16_t *)
2320 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2321 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2323 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2324 phy_sku, regulatory);
2328 iwm_nvm_init(struct iwm_softc *sc)
2330 struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2331 int i, ret, section;
2332 uint32_t size_read = 0;
2333 uint8_t *nvm_buffer, *temp;
2336 memset(nvm_sections, 0, sizeof(nvm_sections));
2338 if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2341 /* load NVM values from nic */
2342 /* Read From FW NVM */
2343 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2345 nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2348 for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2349 /* we override the constness for initial read */
2350 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2355 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2360 memcpy(temp, nvm_buffer, len);
2362 nvm_sections[section].data = temp;
2363 nvm_sections[section].length = len;
2366 device_printf(sc->sc_dev, "OTP is blank\n");
2367 free(nvm_buffer, M_DEVBUF);
2369 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2372 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2373 "nvm version = %x\n", sc->nvm_data->nvm_version);
2375 for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2376 if (nvm_sections[i].data != NULL)
2377 free(nvm_sections[i].data, M_DEVBUF);
2384 * Firmware loading gunk. This is kind of a weird hybrid between the
2385 * iwn driver and the Linux iwlwifi driver.
2389 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2390 const uint8_t *section, uint32_t byte_cnt)
2393 uint32_t chunk_sz, offset;
2395 chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2397 for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2399 const uint8_t *data;
2401 addr = dst_addr + offset;
2402 len = MIN(chunk_sz, byte_cnt - offset);
2403 data = section + offset;
2405 error = iwm_firmware_load_chunk(sc, addr, data, len);
2414 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2415 const uint8_t *chunk, uint32_t byte_cnt)
2417 struct iwm_dma_info *dma = &sc->fw_dma;
2420 /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2421 memcpy(dma->vaddr, chunk, byte_cnt);
2422 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2424 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2425 dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2426 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2427 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2430 sc->sc_fw_chunk_done = 0;
2432 if (!iwm_nic_lock(sc))
2435 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2436 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2437 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2439 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2440 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2441 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2442 (iwm_get_dma_hi_addr(dma->paddr)
2443 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2444 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2445 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2446 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2447 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2448 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2449 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2450 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2451 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2455 /* wait 1s for this segment to load */
2456 while (!sc->sc_fw_chunk_done)
2457 if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
2460 if (!sc->sc_fw_chunk_done) {
2461 device_printf(sc->sc_dev,
2462 "fw chunk addr 0x%x len %d failed to load\n",
2463 dst_addr, byte_cnt);
2466 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2467 dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2468 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2469 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2477 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2478 int cpu, int *first_ucode_section)
2481 int i, error = 0, sec_num = 0x1;
2482 uint32_t val, last_read_idx = 0;
2489 *first_ucode_section = 0;
2492 (*first_ucode_section)++;
2495 for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
2497 data = fws->fw_sect[i].fws_data;
2498 dlen = fws->fw_sect[i].fws_len;
2499 offset = fws->fw_sect[i].fws_devoff;
2502 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2504 * PAGING_SEPARATOR_SECTION delimiter - separate between
2505 * CPU2 non paged to CPU2 paging sec.
2507 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2508 offset == IWM_PAGING_SEPARATOR_SECTION)
2511 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2512 "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2513 i, offset, dlen, cpu);
2515 if (dlen > sc->sc_fwdmasegsz) {
2516 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2517 "chunk %d too large (%d bytes)\n", i, dlen);
2520 error = iwm_firmware_load_sect(sc, offset, data, dlen);
2523 device_printf(sc->sc_dev,
2524 "could not load firmware chunk %d (error %d)\n",
2529 /* Notify the ucode of the loaded section number and status */
2530 if (iwm_nic_lock(sc)) {
2531 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2532 val = val | (sec_num << shift_param);
2533 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2534 sec_num = (sec_num << 1) | 0x1;
2538 * The firmware won't load correctly without this delay.
2544 *first_ucode_section = last_read_idx;
2546 if (iwm_nic_lock(sc)) {
2548 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2550 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2558 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2560 struct iwm_fw_sects *fws;
2562 int first_ucode_section;
2564 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2567 fws = &sc->sc_fw.fw_sects[ucode_type];
2569 /* configure the ucode to be ready to get the secured image */
2570 /* release CPU reset */
2571 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2573 /* load to FW the binary Secured sections of CPU1 */
2574 error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2578 /* load to FW the binary sections of CPU2 */
2579 return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2583 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2585 struct iwm_fw_sects *fws;
2591 sc->sc_uc.uc_intr = 0;
2593 fws = &sc->sc_fw.fw_sects[ucode_type];
2594 for (i = 0; i < fws->fw_count; i++) {
2595 data = fws->fw_sect[i].fws_data;
2596 dlen = fws->fw_sect[i].fws_len;
2597 offset = fws->fw_sect[i].fws_devoff;
2598 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2599 "LOAD FIRMWARE type %d offset %u len %d\n",
2600 ucode_type, offset, dlen);
2601 if (dlen > sc->sc_fwdmasegsz) {
2602 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2603 "chunk %d too large (%d bytes)\n", i, dlen);
2606 error = iwm_firmware_load_sect(sc, offset, data, dlen);
2609 device_printf(sc->sc_dev,
2610 "could not load firmware chunk %u of %u "
2611 "(error=%d)\n", i, fws->fw_count, error);
2616 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2622 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2626 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2627 error = iwm_load_firmware_8000(sc, ucode_type);
2629 error = iwm_load_firmware_7000(sc, ucode_type);
2633 /* wait for the firmware to load */
2634 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2635 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2637 if (error || !sc->sc_uc.uc_ok) {
2638 device_printf(sc->sc_dev, "could not load firmware\n");
2639 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2640 device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2641 iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2642 device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2643 iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2648 * Give the firmware some time to initialize.
2649 * Accessing it too early causes errors.
2651 msleep(&w, &sc->sc_mtx, 0, "iwmfwinit", hz);
2656 /* iwlwifi: pcie/trans.c */
2658 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2662 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2664 if ((error = iwm_nic_init(sc)) != 0) {
2665 device_printf(sc->sc_dev, "unable to init nic\n");
2669 /* make sure rfkill handshake bits are cleared */
2670 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2671 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2672 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2674 /* clear (again), then enable host interrupts */
2675 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2676 iwm_enable_interrupts(sc);
2678 /* really make sure rfkill handshake bits are cleared */
2679 /* maybe we should write a few times more? just to make sure */
2680 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2681 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2683 /* Load the given image to the HW */
2684 return iwm_load_firmware(sc, ucode_type);
2688 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2690 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2691 .valid = htole32(valid_tx_ant),
2694 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2695 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2698 /* iwlwifi: mvm/fw.c */
2700 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2702 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2703 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2705 /* Set parameters */
2706 phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2707 phy_cfg_cmd.calib_control.event_trigger =
2708 sc->sc_default_calib[ucode_type].event_trigger;
2709 phy_cfg_cmd.calib_control.flow_trigger =
2710 sc->sc_default_calib[ucode_type].flow_trigger;
2712 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2713 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2714 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2715 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2719 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2720 enum iwm_ucode_type ucode_type)
2722 enum iwm_ucode_type old_type = sc->sc_uc_current;
2725 if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2726 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2731 sc->sc_uc_current = ucode_type;
2732 error = iwm_start_fw(sc, ucode_type);
2734 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2735 sc->sc_uc_current = old_type;
2739 error = iwm_post_alive(sc);
2741 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2751 * follows iwlwifi/fw.c
2754 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2758 /* do not operate with rfkill switch turned on */
2759 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2760 device_printf(sc->sc_dev,
2761 "radio is disabled by hardware switch\n");
2765 sc->sc_init_complete = 0;
2766 if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2767 IWM_UCODE_TYPE_INIT)) != 0) {
2768 device_printf(sc->sc_dev, "failed to load init firmware\n");
2773 if ((error = iwm_nvm_init(sc)) != 0) {
2774 device_printf(sc->sc_dev, "failed to read nvm\n");
2777 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2782 if ((error = iwm_send_bt_init_conf(sc)) != 0) {
2783 device_printf(sc->sc_dev,
2784 "failed to send bt coex configuration: %d\n", error);
2788 /* Init Smart FIFO. */
2789 error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2794 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2795 "%s: phy_txant=0x%08x, nvm_valid_tx_ant=0x%02x, valid=0x%02x\n",
2797 ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2798 >> IWM_FW_PHY_CFG_TX_CHAIN_POS),
2799 sc->nvm_data->valid_tx_ant,
2800 iwm_fw_valid_tx_ant(sc));
2803 /* Send TX valid antennas before triggering calibrations */
2804 error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
2806 device_printf(sc->sc_dev,
2807 "failed to send antennas before calibration: %d\n", error);
2812 * Send phy configurations command to init uCode
2813 * to start the 16.0 uCode init image internal calibrations.
2815 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2816 device_printf(sc->sc_dev,
2817 "%s: failed to run internal calibration: %d\n",
2823 * Nothing to do but wait for the init complete notification
2826 while (!sc->sc_init_complete) {
2827 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2828 0, "iwminit", 2*hz);
2830 device_printf(sc->sc_dev, "init complete failed: %d\n",
2831 sc->sc_init_complete);
2836 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
2837 sc->sc_init_complete ? "" : "not ");
2846 /* (re)stock rx ring, called at init-time and at runtime */
2848 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2850 struct iwm_rx_ring *ring = &sc->rxq;
2851 struct iwm_rx_data *data = &ring->data[idx];
2853 bus_dmamap_t dmamap = NULL;
2854 bus_dma_segment_t seg;
2857 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2861 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2862 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2863 &seg, &nsegs, BUS_DMA_NOWAIT);
2865 device_printf(sc->sc_dev,
2866 "%s: can't map mbuf, error %d\n", __func__, error);
2870 if (data->m != NULL)
2871 bus_dmamap_unload(ring->data_dmat, data->map);
2873 /* Swap ring->spare_map with data->map */
2875 data->map = ring->spare_map;
2876 ring->spare_map = dmamap;
2878 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2881 /* Update RX descriptor. */
2882 KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
2883 ring->desc[idx] = htole32(seg.ds_addr >> 8);
2884 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2885 BUS_DMASYNC_PREWRITE);
2893 /* iwlwifi: mvm/rx.c */
2894 #define IWM_RSSI_OFFSET 50
2896 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2898 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2899 uint32_t agc_a, agc_b;
2902 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2903 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2904 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2906 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2907 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2908 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2911 * dBm = rssi dB - agc dB - constant.
2912 * Higher AGC (higher radio gain) means lower signal.
2914 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2915 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2916 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2918 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2919 "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2920 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2922 return max_rssi_dbm;
2925 /* iwlwifi: mvm/rx.c */
2927 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2928 * values are reported by the fw as positive values - need to negate
2929 * to obtain their dBM. Account for missing antennas by replacing 0
2930 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2933 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2935 int energy_a, energy_b, energy_c, max_energy;
2938 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2939 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2940 IWM_RX_INFO_ENERGY_ANT_A_POS;
2941 energy_a = energy_a ? -energy_a : -256;
2942 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2943 IWM_RX_INFO_ENERGY_ANT_B_POS;
2944 energy_b = energy_b ? -energy_b : -256;
2945 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2946 IWM_RX_INFO_ENERGY_ANT_C_POS;
2947 energy_c = energy_c ? -energy_c : -256;
2948 max_energy = MAX(energy_a, energy_b);
2949 max_energy = MAX(max_energy, energy_c);
2951 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2952 "energy In A %d B %d C %d , and max %d\n",
2953 energy_a, energy_b, energy_c, max_energy);
2959 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2960 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2962 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2964 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2965 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2967 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2971 * Retrieve the average noise (in dBm) among receivers.
2974 iwm_get_noise(struct iwm_softc *sc,
2975 const struct iwm_mvm_statistics_rx_non_phy *stats)
2977 int i, total, nbant, noise;
2979 total = nbant = noise = 0;
2980 for (i = 0; i < 3; i++) {
2981 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2982 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
2993 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
2994 __func__, nbant, total);
2996 /* There should be at least one antenna but check anyway. */
2997 return (nbant == 0) ? -127 : (total / nbant) - 107;
2999 /* For now, just hard-code it to -96 to be safe */
3005 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3007 * Handles the actual data of the Rx packet from the fw
3010 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3011 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3013 struct ieee80211com *ic = &sc->sc_ic;
3014 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3015 struct ieee80211_frame *wh;
3016 struct ieee80211_node *ni;
3017 struct ieee80211_rx_stats rxs;
3019 struct iwm_rx_phy_info *phy_info;
3020 struct iwm_rx_mpdu_res_start *rx_res;
3022 uint32_t rx_pkt_status;
3025 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3027 phy_info = &sc->sc_last_phy_info;
3028 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3029 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3030 len = le16toh(rx_res->byte_count);
3031 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3034 m->m_data = pkt->data + sizeof(*rx_res);
3035 m->m_pkthdr.len = m->m_len = len;
3037 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3038 device_printf(sc->sc_dev,
3039 "dsp size out of range [0,20]: %d\n",
3040 phy_info->cfg_phy_cnt);
3044 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3045 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3046 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3047 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3051 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3052 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3054 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3057 /* Note: RSSI is absolute (ie a -ve value) */
3058 if (rssi < IWM_MIN_DBM)
3060 else if (rssi > IWM_MAX_DBM)
3063 /* Map it to relative value */
3064 rssi = rssi - sc->sc_noise;
3066 /* replenish ring for the buffer we're going to feed to the sharks */
3067 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3068 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3073 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3074 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3076 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3078 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3079 "%s: phy_info: channel=%d, flags=0x%08x\n",
3081 le16toh(phy_info->channel),
3082 le16toh(phy_info->phy_flags));
3085 * Populate an RX state struct with the provided information.
3087 bzero(&rxs, sizeof(rxs));
3088 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3089 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3090 rxs.c_ieee = le16toh(phy_info->channel);
3091 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3092 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3094 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3097 /* rssi is in 1/2db units */
3098 rxs.c_rssi = rssi * 2;
3099 rxs.c_nf = sc->sc_noise;
3100 if (ieee80211_add_rx_params(m, &rxs) == 0) {
3102 ieee80211_free_node(ni);
3106 if (ieee80211_radiotap_active_vap(vap)) {
3107 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3110 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3111 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3112 tap->wr_chan_freq = htole16(rxs.c_freq);
3113 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3114 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3115 tap->wr_dbm_antsignal = (int8_t)rssi;
3116 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3117 tap->wr_tsft = phy_info->system_timestamp;
3118 switch (phy_info->rate) {
3120 case 10: tap->wr_rate = 2; break;
3121 case 20: tap->wr_rate = 4; break;
3122 case 55: tap->wr_rate = 11; break;
3123 case 110: tap->wr_rate = 22; break;
3125 case 0xd: tap->wr_rate = 12; break;
3126 case 0xf: tap->wr_rate = 18; break;
3127 case 0x5: tap->wr_rate = 24; break;
3128 case 0x7: tap->wr_rate = 36; break;
3129 case 0x9: tap->wr_rate = 48; break;
3130 case 0xb: tap->wr_rate = 72; break;
3131 case 0x1: tap->wr_rate = 96; break;
3132 case 0x3: tap->wr_rate = 108; break;
3133 /* Unknown rate: should not happen. */
3134 default: tap->wr_rate = 0;
3140 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3141 ieee80211_input_mimo(ni, m);
3142 ieee80211_free_node(ni);
3144 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3145 ieee80211_input_mimo_all(ic, m);
3152 counter_u64_add(ic->ic_ierrors, 1);
3156 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3157 struct iwm_node *in)
3159 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3160 struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3161 struct ieee80211_node *ni = &in->in_ni;
3162 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3164 KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3166 /* Update rate control statistics. */
3167 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3169 (int) le16toh(tx_resp->status.status),
3170 (int) le16toh(tx_resp->status.sequence),
3171 tx_resp->frame_count,
3172 tx_resp->bt_kill_count,
3173 tx_resp->failure_rts,
3174 tx_resp->failure_frame,
3175 le32toh(tx_resp->initial_rate),
3176 (int) le16toh(tx_resp->wireless_media_time));
3178 txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3179 IEEE80211_RATECTL_STATUS_LONG_RETRY;
3180 txs->short_retries = tx_resp->failure_rts;
3181 txs->long_retries = tx_resp->failure_frame;
3182 if (status != IWM_TX_STATUS_SUCCESS &&
3183 status != IWM_TX_STATUS_DIRECT_DONE) {
3185 case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3186 txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3188 case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3189 txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3191 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3192 txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3195 txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3199 txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3201 ieee80211_ratectl_tx_complete(ni, txs);
3203 return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3207 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3208 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3210 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3211 int idx = cmd_hdr->idx;
3212 int qid = cmd_hdr->qid;
3213 struct iwm_tx_ring *ring = &sc->txq[qid];
3214 struct iwm_tx_data *txd = &ring->data[idx];
3215 struct iwm_node *in = txd->in;
3216 struct mbuf *m = txd->m;
3219 KASSERT(txd->done == 0, ("txd not done"));
3220 KASSERT(txd->in != NULL, ("txd without node"));
3221 KASSERT(txd->m != NULL, ("txd without mbuf"));
3223 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3225 sc->sc_tx_timer = 0;
3227 status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3229 /* Unmap and free mbuf. */
3230 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3231 bus_dmamap_unload(ring->data_dmat, txd->map);
3233 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3234 "free txd %p, in %p\n", txd, txd->in);
3239 ieee80211_tx_complete(&in->in_ni, m, status);
3241 if (--ring->queued < IWM_TX_RING_LOMARK) {
3242 sc->qfullmsk &= ~(1 << ring->qid);
3243 if (sc->qfullmsk == 0) {
3245 * Well, we're in interrupt context, but then again
3246 * I guess net80211 does all sorts of stunts in
3247 * interrupt context, so maybe this is no biggie.
3259 * Process a "command done" firmware notification. This is where we wakeup
3260 * processes waiting for a synchronous command completion.
3264 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3266 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3267 struct iwm_tx_data *data;
3269 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3270 return; /* Not a command ack. */
3273 /* XXX wide commands? */
3274 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3275 "cmd notification type 0x%x qid %d idx %d\n",
3276 pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3278 data = &ring->data[pkt->hdr.idx];
3280 /* If the command was mapped in an mbuf, free it. */
3281 if (data->m != NULL) {
3282 bus_dmamap_sync(ring->data_dmat, data->map,
3283 BUS_DMASYNC_POSTWRITE);
3284 bus_dmamap_unload(ring->data_dmat, data->map);
3288 wakeup(&ring->desc[pkt->hdr.idx]);
3293 * necessary only for block ack mode
3296 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3299 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3302 scd_bc_tbl = sc->sched_dma.vaddr;
3304 len += 8; /* magic numbers came naturally from paris */
3305 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3306 len = roundup(len, 4) / 4;
3308 w_val = htole16(sta_id << 12 | len);
3310 /* Update TX scheduler. */
3311 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3312 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3313 BUS_DMASYNC_PREWRITE);
3315 /* I really wonder what this is ?!? */
3316 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3317 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3318 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3319 BUS_DMASYNC_PREWRITE);
3325 * Take an 802.11 (non-n) rate, find the relevant rate
3326 * table entry. return the index into in_ridx[].
3328 * The caller then uses that index back into in_ridx
3329 * to figure out the rate index programmed /into/
3330 * the firmware for this given node.
3333 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3339 for (i = 0; i < nitems(in->in_ridx); i++) {
3340 r = iwm_rates[in->in_ridx[i]].rate;
3345 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3346 "%s: couldn't find an entry for rate=%d\n",
3350 /* XXX Return the first */
3351 /* XXX TODO: have it return the /lowest/ */
3356 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3360 for (i = 0; i < nitems(iwm_rates); i++) {
3361 if (iwm_rates[i].rate == rate)
3365 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3366 "%s: couldn't find an entry for rate=%d\n",
3373 * Fill in the rate related information for a transmit command.
3375 static const struct iwm_rate *
3376 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3377 struct mbuf *m, struct iwm_tx_cmd *tx)
3379 struct ieee80211_node *ni = &in->in_ni;
3380 struct ieee80211_frame *wh;
3381 const struct ieee80211_txparam *tp = ni->ni_txparms;
3382 const struct iwm_rate *rinfo;
3384 int ridx, rate_flags;
3386 wh = mtod(m, struct ieee80211_frame *);
3387 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3389 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3390 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3392 if (type == IEEE80211_FC0_TYPE_MGT) {
3393 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3394 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3395 "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3396 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3397 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3398 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3399 "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3400 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3401 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3402 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3403 "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3404 } else if (m->m_flags & M_EAPOL) {
3405 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3406 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3407 "%s: EAPOL\n", __func__);
3408 } else if (type == IEEE80211_FC0_TYPE_DATA) {
3411 /* for data frames, use RS table */
3412 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3413 /* XXX pass pktlen */
3414 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3415 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3416 ridx = in->in_ridx[i];
3418 /* This is the index into the programmed table */
3419 tx->initial_rate_index = i;
3420 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3422 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3423 "%s: start with i=%d, txrate %d\n",
3424 __func__, i, iwm_rates[ridx].rate);
3426 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3427 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3428 __func__, tp->mgmtrate);
3431 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3432 "%s: frame type=%d txrate %d\n",
3433 __func__, type, iwm_rates[ridx].rate);
3435 rinfo = &iwm_rates[ridx];
3437 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3440 !! (IWM_RIDX_IS_CCK(ridx))
3443 /* XXX TODO: hard-coded TX antenna? */
3444 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3445 if (IWM_RIDX_IS_CCK(ridx))
3446 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3447 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3454 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3456 struct ieee80211com *ic = &sc->sc_ic;
3457 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3458 struct iwm_node *in = IWM_NODE(ni);
3459 struct iwm_tx_ring *ring;
3460 struct iwm_tx_data *data;
3461 struct iwm_tfd *desc;
3462 struct iwm_device_cmd *cmd;
3463 struct iwm_tx_cmd *tx;
3464 struct ieee80211_frame *wh;
3465 struct ieee80211_key *k = NULL;
3467 const struct iwm_rate *rinfo;
3470 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3473 int i, totlen, error, pad;
3475 wh = mtod(m, struct ieee80211_frame *);
3476 hdrlen = ieee80211_anyhdrsize(wh);
3477 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3479 ring = &sc->txq[ac];
3480 desc = &ring->desc[ring->cur];
3481 memset(desc, 0, sizeof(*desc));
3482 data = &ring->data[ring->cur];
3484 /* Fill out iwm_tx_cmd to send to the firmware */
3485 cmd = &ring->cmd[ring->cur];
3486 cmd->hdr.code = IWM_TX_CMD;
3488 cmd->hdr.qid = ring->qid;
3489 cmd->hdr.idx = ring->cur;
3491 tx = (void *)cmd->data;
3492 memset(tx, 0, sizeof(*tx));
3494 rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3496 /* Encrypt the frame if need be. */
3497 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3498 /* Retrieve key for TX && do software encryption. */
3499 k = ieee80211_crypto_encap(ni, m);
3504 /* 802.11 header may have moved. */
3505 wh = mtod(m, struct ieee80211_frame *);
3508 if (ieee80211_radiotap_active_vap(vap)) {
3509 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3512 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3513 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3514 tap->wt_rate = rinfo->rate;
3516 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3517 ieee80211_radiotap_tx(vap, m);
3521 totlen = m->m_pkthdr.len;
3524 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3525 flags |= IWM_TX_CMD_FLG_ACK;
3528 if (type == IEEE80211_FC0_TYPE_DATA
3529 && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3530 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3531 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3534 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3535 type != IEEE80211_FC0_TYPE_DATA)
3536 tx->sta_id = sc->sc_aux_sta.sta_id;
3538 tx->sta_id = IWM_STATION_ID;
3540 if (type == IEEE80211_FC0_TYPE_MGT) {
3541 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3543 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3544 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3545 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3546 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3547 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3549 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3552 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3556 /* First segment length must be a multiple of 4. */
3557 flags |= IWM_TX_CMD_FLG_MH_PAD;
3558 pad = 4 - (hdrlen & 3);
3562 tx->driver_txop = 0;
3563 tx->next_frame_len = 0;
3565 tx->len = htole16(totlen);
3566 tx->tid_tspec = tid;
3567 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3569 /* Set physical address of "scratch area". */
3570 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3571 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3573 /* Copy 802.11 header in TX command. */
3574 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3576 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3579 tx->tx_flags |= htole32(flags);
3581 /* Trim 802.11 header. */
3583 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3584 segs, &nsegs, BUS_DMA_NOWAIT);
3586 if (error != EFBIG) {
3587 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3592 /* Too many DMA segments, linearize mbuf. */
3593 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3595 device_printf(sc->sc_dev,
3596 "%s: could not defrag mbuf\n", __func__);
3602 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3603 segs, &nsegs, BUS_DMA_NOWAIT);
3605 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3615 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3616 "sending txd %p, in %p\n", data, data->in);
3617 KASSERT(data->in != NULL, ("node is NULL"));
3619 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3620 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3621 ring->qid, ring->cur, totlen, nsegs,
3622 le32toh(tx->tx_flags),
3623 le32toh(tx->rate_n_flags),
3624 tx->initial_rate_index
3627 /* Fill TX descriptor. */
3628 desc->num_tbs = 2 + nsegs;
3630 desc->tbs[0].lo = htole32(data->cmd_paddr);
3631 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3633 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3634 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3635 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3636 + hdrlen + pad - TB0_SIZE) << 4);
3638 /* Other DMA segments are for data payload. */
3639 for (i = 0; i < nsegs; i++) {
3641 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3642 desc->tbs[i+2].hi_n_len = \
3643 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3644 | ((seg->ds_len) << 4);
3647 bus_dmamap_sync(ring->data_dmat, data->map,
3648 BUS_DMASYNC_PREWRITE);
3649 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3650 BUS_DMASYNC_PREWRITE);
3651 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3652 BUS_DMASYNC_PREWRITE);
3655 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3659 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3660 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3662 /* Mark TX ring as full if we reach a certain threshold. */
3663 if (++ring->queued > IWM_TX_RING_HIMARK) {
3664 sc->qfullmsk |= 1 << ring->qid;
3671 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3672 const struct ieee80211_bpf_params *params)
3674 struct ieee80211com *ic = ni->ni_ic;
3675 struct iwm_softc *sc = ic->ic_softc;
3678 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3679 "->%s begin\n", __func__);
3681 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3683 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3684 "<-%s not RUNNING\n", __func__);
3690 if (params == NULL) {
3691 error = iwm_tx(sc, m, ni, 0);
3693 error = iwm_tx(sc, m, ni, 0);
3695 sc->sc_tx_timer = 5;
3706 * Note that there are transports that buffer frames before they reach
3707 * the firmware. This means that after flush_tx_path is called, the
3708 * queue might not be empty. The race-free way to handle this is to:
3709 * 1) set the station as draining
3710 * 2) flush the Tx path
3711 * 3) wait for the transport queues to be empty
3714 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3717 struct iwm_tx_path_flush_cmd flush_cmd = {
3718 .queues_ctl = htole32(tfd_msk),
3719 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3722 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3723 sizeof(flush_cmd), &flush_cmd);
3725 device_printf(sc->sc_dev,
3726 "Flushing tx queue failed: %d\n", ret);
3735 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3736 struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3738 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3742 /* send station add/update command to firmware */
3744 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3746 struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3750 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3752 add_sta_cmd.sta_id = IWM_STATION_ID;
3753 add_sta_cmd.mac_id_n_color
3754 = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3755 IWM_DEFAULT_COLOR));
3758 for (ac = 0; ac < WME_NUM_AC; ac++) {
3759 add_sta_cmd.tfd_queue_msk |=
3760 htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3762 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3764 add_sta_cmd.add_modify = update ? 1 : 0;
3765 add_sta_cmd.station_flags_msk
3766 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3767 add_sta_cmd.tid_disable_tx = htole16(0xffff);
3769 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3771 status = IWM_ADD_STA_SUCCESS;
3772 ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3777 case IWM_ADD_STA_SUCCESS:
3781 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3789 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3791 return iwm_mvm_sta_send_to_fw(sc, in, 0);
3795 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3797 return iwm_mvm_sta_send_to_fw(sc, in, 1);
3801 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3802 const uint8_t *addr, uint16_t mac_id, uint16_t color)
3804 struct iwm_mvm_add_sta_cmd_v7 cmd;
3808 memset(&cmd, 0, sizeof(cmd));
3809 cmd.sta_id = sta->sta_id;
3810 cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3812 cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3813 cmd.tid_disable_tx = htole16(0xffff);
3816 IEEE80211_ADDR_COPY(cmd.addr, addr);
3818 ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3823 case IWM_ADD_STA_SUCCESS:
3824 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3825 "%s: Internal station added.\n", __func__);
3828 device_printf(sc->sc_dev,
3829 "%s: Add internal station failed, status=0x%x\n",
3838 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3842 sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3843 sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3845 ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3849 ret = iwm_mvm_add_int_sta_common(sc,
3850 &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3853 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3866 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3868 struct iwm_time_quota_cmd cmd;
3869 int i, idx, ret, num_active_macs, quota, quota_rem;
3870 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3871 int n_ifs[IWM_MAX_BINDINGS] = {0, };
3874 memset(&cmd, 0, sizeof(cmd));
3876 /* currently, PHY ID == binding ID */
3878 id = in->in_phyctxt->id;
3879 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3880 colors[id] = in->in_phyctxt->color;
3887 * The FW's scheduling session consists of
3888 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3889 * equally between all the bindings that require quota
3891 num_active_macs = 0;
3892 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3893 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3894 num_active_macs += n_ifs[i];
3899 if (num_active_macs) {
3900 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3901 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3904 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3908 cmd.quotas[idx].id_and_color =
3909 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3911 if (n_ifs[i] <= 0) {
3912 cmd.quotas[idx].quota = htole32(0);
3913 cmd.quotas[idx].max_duration = htole32(0);
3915 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3916 cmd.quotas[idx].max_duration = htole32(0);
3921 /* Give the remainder of the session to the first binding */
3922 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3924 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3927 device_printf(sc->sc_dev,
3928 "%s: Failed to send quota: %d\n", __func__, ret);
3937 * ieee80211 routines
3941 * Change to AUTH state in 80211 state machine. Roughly matches what
3942 * Linux does in bss_info_changed().
3945 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3947 struct ieee80211_node *ni;
3948 struct iwm_node *in;
3949 struct iwm_vap *iv = IWM_VAP(vap);
3954 * XXX i have a feeling that the vap node is being
3955 * freed from underneath us. Grr.
3957 ni = ieee80211_ref_node(vap->iv_bss);
3959 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3960 "%s: called; vap=%p, bss ni=%p\n",
3967 error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3971 error = iwm_allow_mcast(vap, sc);
3973 device_printf(sc->sc_dev,
3974 "%s: failed to set multicast\n", __func__);
3979 * This is where it deviates from what Linux does.
3981 * Linux iwlwifi doesn't reset the nic each time, nor does it
3982 * call ctxt_add() here. Instead, it adds it during vap creation,
3983 * and always does a mac_ctx_changed().
3985 * The openbsd port doesn't attempt to do that - it reset things
3986 * at odd states and does the add here.
3988 * So, until the state handling is fixed (ie, we never reset
3989 * the NIC except for a firmware failure, which should drag
3990 * the NIC back to IDLE, re-setup and re-add all the mac/phy
3991 * contexts that are required), let's do a dirty hack here.
3993 if (iv->is_uploaded) {
3994 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3995 device_printf(sc->sc_dev,
3996 "%s: failed to update MAC\n", __func__);
3999 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4000 in->in_ni.ni_chan, 1, 1)) != 0) {
4001 device_printf(sc->sc_dev,
4002 "%s: failed update phy ctxt\n", __func__);
4005 in->in_phyctxt = &sc->sc_phyctxt[0];
4007 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
4008 device_printf(sc->sc_dev,
4009 "%s: binding update cmd\n", __func__);
4012 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4013 device_printf(sc->sc_dev,
4014 "%s: failed to update sta\n", __func__);
4018 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4019 device_printf(sc->sc_dev,
4020 "%s: failed to add MAC\n", __func__);
4023 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4024 in->in_ni.ni_chan, 1, 1)) != 0) {
4025 device_printf(sc->sc_dev,
4026 "%s: failed add phy ctxt!\n", __func__);
4030 in->in_phyctxt = &sc->sc_phyctxt[0];
4032 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
4033 device_printf(sc->sc_dev,
4034 "%s: binding add cmd\n", __func__);
4037 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4038 device_printf(sc->sc_dev,
4039 "%s: failed to add sta\n", __func__);
4045 * Prevent the FW from wandering off channel during association
4046 * by "protecting" the session with a time event.
4048 /* XXX duration is in units of TU, not MS */
4049 duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4050 iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
4055 ieee80211_free_node(ni);
4060 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4062 struct iwm_node *in = IWM_NODE(vap->iv_bss);
4065 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4066 device_printf(sc->sc_dev,
4067 "%s: failed to update STA\n", __func__);
4072 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4073 device_printf(sc->sc_dev,
4074 "%s: failed to update MAC\n", __func__);
4082 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4087 * Ok, so *technically* the proper set of calls for going
4088 * from RUN back to SCAN is:
4090 * iwm_mvm_power_mac_disable(sc, in);
4091 * iwm_mvm_mac_ctxt_changed(sc, in);
4092 * iwm_mvm_rm_sta(sc, in);
4093 * iwm_mvm_update_quotas(sc, NULL);
4094 * iwm_mvm_mac_ctxt_changed(sc, in);
4095 * iwm_mvm_binding_remove_vif(sc, in);
4096 * iwm_mvm_mac_ctxt_remove(sc, in);
4098 * However, that freezes the device not matter which permutations
4099 * and modifications are attempted. Obviously, this driver is missing
4100 * something since it works in the Linux driver, but figuring out what
4101 * is missing is a little more complicated. Now, since we're going
4102 * back to nothing anyway, we'll just do a complete device reset.
4103 * Up your's, device!
4106 * Just using 0xf for the queues mask is fine as long as we only
4107 * get here from RUN state.
4110 mbufq_drain(&sc->sc_snd);
4111 iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4113 * We seem to get away with just synchronously sending the
4114 * IWM_TXPATH_FLUSH command.
4116 // iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4117 iwm_stop_device(sc);
4126 iwm_mvm_power_mac_disable(sc, in);
4128 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4129 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4133 if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4134 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4137 error = iwm_mvm_rm_sta(sc, in);
4139 iwm_mvm_update_quotas(sc, NULL);
4140 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4141 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4144 iwm_mvm_binding_remove_vif(sc, in);
4146 iwm_mvm_mac_ctxt_remove(sc, in);
4152 static struct ieee80211_node *
4153 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4155 return malloc(sizeof (struct iwm_node), M_80211_NODE,
4160 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4162 struct ieee80211_node *ni = &in->in_ni;
4163 struct iwm_lq_cmd *lq = &in->in_lq;
4164 int nrates = ni->ni_rates.rs_nrates;
4165 int i, ridx, tab = 0;
4168 if (nrates > nitems(lq->rs_table)) {
4169 device_printf(sc->sc_dev,
4170 "%s: node supports %d rates, driver handles "
4171 "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4175 device_printf(sc->sc_dev,
4176 "%s: node supports 0 rates, odd!\n", __func__);
4181 * XXX .. and most of iwm_node is not initialised explicitly;
4182 * it's all just 0x0 passed to the firmware.
4185 /* first figure out which rates we should support */
4186 /* XXX TODO: this isn't 11n aware /at all/ */
4187 memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4188 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4189 "%s: nrates=%d\n", __func__, nrates);
4192 * Loop over nrates and populate in_ridx from the highest
4193 * rate to the lowest rate. Remember, in_ridx[] has
4194 * IEEE80211_RATE_MAXSIZE entries!
4196 for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4197 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4199 /* Map 802.11 rate to HW rate index. */
4200 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4201 if (iwm_rates[ridx].rate == rate)
4203 if (ridx > IWM_RIDX_MAX) {
4204 device_printf(sc->sc_dev,
4205 "%s: WARNING: device rate for %d not found!\n",
4208 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4209 "%s: rate: i: %d, rate=%d, ridx=%d\n",
4214 in->in_ridx[i] = ridx;
4218 /* then construct a lq_cmd based on those */
4219 memset(lq, 0, sizeof(*lq));
4220 lq->sta_id = IWM_STATION_ID;
4222 /* For HT, always enable RTS/CTS to avoid excessive retries. */
4223 if (ni->ni_flags & IEEE80211_NODE_HT)
4224 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4227 * are these used? (we don't do SISO or MIMO)
4228 * need to set them to non-zero, though, or we get an error.
4230 lq->single_stream_ant_msk = 1;
4231 lq->dual_stream_ant_msk = 1;
4234 * Build the actual rate selection table.
4235 * The lowest bits are the rates. Additionally,
4236 * CCK needs bit 9 to be set. The rest of the bits
4237 * we add to the table select the tx antenna
4238 * Note that we add the rates in the highest rate first
4239 * (opposite of ni_rates).
4242 * XXX TODO: this should be looping over the min of nrates
4243 * and LQ_MAX_RETRY_NUM. Sigh.
4245 for (i = 0; i < nrates; i++) {
4250 txant = iwm_mvm_get_valid_tx_ant(sc);
4251 nextant = 1<<(ffs(txant)-1);
4254 nextant = iwm_mvm_get_valid_tx_ant(sc);
4257 * Map the rate id into a rate index into
4258 * our hardware table containing the
4259 * configuration to use for this rate.
4261 ridx = in->in_ridx[i];
4262 tab = iwm_rates[ridx].plcp;
4263 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4264 if (IWM_RIDX_IS_CCK(ridx))
4265 tab |= IWM_RATE_MCS_CCK_MSK;
4266 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4267 "station rate i=%d, rate=%d, hw=%x\n",
4268 i, iwm_rates[ridx].rate, tab);
4269 lq->rs_table[i] = htole32(tab);
4271 /* then fill the rest with the lowest possible rate */
4272 for (i = nrates; i < nitems(lq->rs_table); i++) {
4273 KASSERT(tab != 0, ("invalid tab"));
4274 lq->rs_table[i] = htole32(tab);
4279 iwm_media_change(struct ifnet *ifp)
4281 struct ieee80211vap *vap = ifp->if_softc;
4282 struct ieee80211com *ic = vap->iv_ic;
4283 struct iwm_softc *sc = ic->ic_softc;
4286 error = ieee80211_media_change(ifp);
4287 if (error != ENETRESET)
4291 if (ic->ic_nrunning > 0) {
4301 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4303 struct iwm_vap *ivp = IWM_VAP(vap);
4304 struct ieee80211com *ic = vap->iv_ic;
4305 struct iwm_softc *sc = ic->ic_softc;
4306 struct iwm_node *in;
4309 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4310 "switching state %s -> %s\n",
4311 ieee80211_state_name[vap->iv_state],
4312 ieee80211_state_name[nstate]);
4313 IEEE80211_UNLOCK(ic);
4316 if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4317 iwm_led_blink_stop(sc);
4319 /* disable beacon filtering if we're hopping out of RUN */
4320 if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4321 iwm_mvm_disable_beacon_filter(sc);
4323 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4326 if (nstate == IEEE80211_S_INIT) {
4329 error = ivp->iv_newstate(vap, nstate, arg);
4330 IEEE80211_UNLOCK(ic);
4332 iwm_release(sc, NULL);
4339 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4340 * above then the card will be completely reinitialized,
4341 * so the driver must do everything necessary to bring the card
4342 * from INIT to SCAN.
4344 * Additionally, upon receiving deauth frame from AP,
4345 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4346 * state. This will also fail with this driver, so bring the FSM
4347 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4349 * XXX TODO: fix this for FreeBSD!
4351 if (nstate == IEEE80211_S_SCAN ||
4352 nstate == IEEE80211_S_AUTH ||
4353 nstate == IEEE80211_S_ASSOC) {
4354 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4355 "Force transition to INIT; MGT=%d\n", arg);
4358 /* Always pass arg as -1 since we can't Tx right now. */
4360 * XXX arg is just ignored anyway when transitioning
4361 * to IEEE80211_S_INIT.
4363 vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4364 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4365 "Going INIT->SCAN\n");
4366 nstate = IEEE80211_S_SCAN;
4367 IEEE80211_UNLOCK(ic);
4373 case IEEE80211_S_INIT:
4376 case IEEE80211_S_AUTH:
4377 if ((error = iwm_auth(vap, sc)) != 0) {
4378 device_printf(sc->sc_dev,
4379 "%s: could not move to auth state: %d\n",
4385 case IEEE80211_S_ASSOC:
4386 if ((error = iwm_assoc(vap, sc)) != 0) {
4387 device_printf(sc->sc_dev,
4388 "%s: failed to associate: %d\n", __func__,
4394 case IEEE80211_S_RUN:
4396 struct iwm_host_cmd cmd = {
4398 .len = { sizeof(in->in_lq), },
4399 .flags = IWM_CMD_SYNC,
4402 /* Update the association state, now we have it all */
4403 /* (eg associd comes in at this point */
4404 error = iwm_assoc(vap, sc);
4406 device_printf(sc->sc_dev,
4407 "%s: failed to update association state: %d\n",
4413 in = IWM_NODE(vap->iv_bss);
4414 iwm_mvm_power_mac_update_mode(sc, in);
4415 iwm_mvm_enable_beacon_filter(sc, in);
4416 iwm_mvm_update_quotas(sc, in);
4417 iwm_setrates(sc, in);
4419 cmd.data[0] = &in->in_lq;
4420 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4421 device_printf(sc->sc_dev,
4422 "%s: IWM_LQ_CMD failed\n", __func__);
4425 iwm_mvm_led_enable(sc);
4435 return (ivp->iv_newstate(vap, nstate, arg));
4439 iwm_endscan_cb(void *arg, int pending)
4441 struct iwm_softc *sc = arg;
4442 struct ieee80211com *ic = &sc->sc_ic;
4444 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4448 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4452 * Aging and idle timeouts for the different possible scenarios
4453 * in default configuration
4455 static const uint32_t
4456 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4458 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4459 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4462 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4463 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4466 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4467 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4470 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4471 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4474 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4475 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4480 * Aging and idle timeouts for the different possible scenarios
4481 * in single BSS MAC configuration.
4483 static const uint32_t
4484 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4486 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4487 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4490 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4491 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4494 htole32(IWM_SF_MCAST_AGING_TIMER),
4495 htole32(IWM_SF_MCAST_IDLE_TIMER)
4498 htole32(IWM_SF_BA_AGING_TIMER),
4499 htole32(IWM_SF_BA_IDLE_TIMER)
4502 htole32(IWM_SF_TX_RE_AGING_TIMER),
4503 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4508 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4509 struct ieee80211_node *ni)
4511 int i, j, watermark;
4513 sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4516 * If we are in association flow - check antenna configuration
4517 * capabilities of the AP station, and choose the watermark accordingly.
4520 if (ni->ni_flags & IEEE80211_NODE_HT) {
4522 if (ni->ni_rxmcs[2] != 0)
4523 watermark = IWM_SF_W_MARK_MIMO3;
4524 else if (ni->ni_rxmcs[1] != 0)
4525 watermark = IWM_SF_W_MARK_MIMO2;
4528 watermark = IWM_SF_W_MARK_SISO;
4530 watermark = IWM_SF_W_MARK_LEGACY;
4532 /* default watermark value for unassociated mode. */
4534 watermark = IWM_SF_W_MARK_MIMO2;
4536 sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4538 for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4539 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4540 sf_cmd->long_delay_timeouts[i][j] =
4541 htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4546 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4547 sizeof(iwm_sf_full_timeout));
4549 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4550 sizeof(iwm_sf_full_timeout_def));
4555 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4557 struct ieee80211com *ic = &sc->sc_ic;
4558 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4559 struct iwm_sf_cfg_cmd sf_cmd = {
4560 .state = htole32(IWM_SF_FULL_ON),
4564 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4565 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4567 switch (new_state) {
4569 case IWM_SF_INIT_OFF:
4570 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4572 case IWM_SF_FULL_ON:
4573 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4576 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4577 "Invalid state: %d. not sending Smart Fifo cmd\n",
4582 ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4583 sizeof(sf_cmd), &sf_cmd);
4588 iwm_send_bt_init_conf(struct iwm_softc *sc)
4590 struct iwm_bt_coex_cmd bt_cmd;
4592 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4593 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4595 return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4600 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4602 struct iwm_mcc_update_cmd mcc_cmd;
4603 struct iwm_host_cmd hcmd = {
4604 .id = IWM_MCC_UPDATE_CMD,
4605 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4606 .data = { &mcc_cmd },
4610 struct iwm_rx_packet *pkt;
4611 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4612 struct iwm_mcc_update_resp *mcc_resp;
4616 int resp_v2 = isset(sc->sc_enabled_capa,
4617 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4619 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4620 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4621 if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4622 isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4623 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4625 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4628 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4630 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4632 IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4633 "send MCC update to FW with '%c%c' src = %d\n",
4634 alpha2[0], alpha2[1], mcc_cmd.source_id);
4636 ret = iwm_send_cmd(sc, &hcmd);
4641 pkt = hcmd.resp_pkt;
4643 /* Extract MCC response */
4645 mcc_resp = (void *)pkt->data;
4646 mcc = mcc_resp->mcc;
4647 n_channels = le32toh(mcc_resp->n_channels);
4649 mcc_resp_v1 = (void *)pkt->data;
4650 mcc = mcc_resp_v1->mcc;
4651 n_channels = le32toh(mcc_resp_v1->n_channels);
4654 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4656 mcc = 0x3030; /* "00" - world */
4658 IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4659 "regulatory domain '%c%c' (%d channels available)\n",
4660 mcc >> 8, mcc & 0xff, n_channels);
4662 iwm_free_resp(sc, &hcmd);
4668 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4670 struct iwm_host_cmd cmd = {
4671 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4672 .len = { sizeof(uint32_t), },
4673 .data = { &backoff, },
4676 if (iwm_send_cmd(sc, &cmd) != 0) {
4677 device_printf(sc->sc_dev,
4678 "failed to change thermal tx backoff\n");
4683 iwm_init_hw(struct iwm_softc *sc)
4685 struct ieee80211com *ic = &sc->sc_ic;
4688 if ((error = iwm_start_hw(sc)) != 0) {
4689 printf("iwm_start_hw: failed %d\n", error);
4693 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4694 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4699 * should stop and start HW since that INIT
4702 iwm_stop_device(sc);
4703 if ((error = iwm_start_hw(sc)) != 0) {
4704 device_printf(sc->sc_dev, "could not initialize hardware\n");
4708 /* omstart, this time with the regular firmware */
4709 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
4711 device_printf(sc->sc_dev, "could not load firmware\n");
4715 if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4716 device_printf(sc->sc_dev, "bt init conf failed\n");
4720 error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4722 device_printf(sc->sc_dev, "antenna config failed\n");
4726 /* Send phy db control command and then phy db calibration */
4727 if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4730 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4731 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4735 /* Add auxiliary station for scanning */
4736 if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4737 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4741 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4743 * The channel used here isn't relevant as it's
4744 * going to be overwritten in the other flows.
4745 * For now use the first channel we have.
4747 if ((error = iwm_mvm_phy_ctxt_add(sc,
4748 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4752 /* Initialize tx backoffs to the minimum. */
4753 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4754 iwm_mvm_tt_tx_backoff(sc, 0);
4756 error = iwm_mvm_power_update_device(sc);
4760 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4761 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4765 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4766 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4770 /* Enable Tx queues. */
4771 for (ac = 0; ac < WME_NUM_AC; ac++) {
4772 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4773 iwm_mvm_ac_to_tx_fifo[ac]);
4778 if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4779 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4786 iwm_stop_device(sc);
4790 /* Allow multicast from our BSSID. */
4792 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4794 struct ieee80211_node *ni = vap->iv_bss;
4795 struct iwm_mcast_filter_cmd *cmd;
4799 size = roundup(sizeof(*cmd), 4);
4800 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4803 cmd->filter_own = 1;
4807 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4809 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4810 IWM_CMD_SYNC, size, cmd);
4811 free(cmd, M_DEVBUF);
4821 iwm_init(struct iwm_softc *sc)
4825 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4828 sc->sc_generation++;
4829 sc->sc_flags &= ~IWM_FLAG_STOPPED;
4831 if ((error = iwm_init_hw(sc)) != 0) {
4832 printf("iwm_init_hw failed %d\n", error);
4838 * Ok, firmware loaded and we are jogging
4840 sc->sc_flags |= IWM_FLAG_HW_INITED;
4841 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4845 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4847 struct iwm_softc *sc;
4853 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4857 error = mbufq_enqueue(&sc->sc_snd, m);
4868 * Dequeue packets from sendq and call send.
4871 iwm_start(struct iwm_softc *sc)
4873 struct ieee80211_node *ni;
4877 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4878 while (sc->qfullmsk == 0 &&
4879 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4880 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4881 if (iwm_tx(sc, m, ni, ac) != 0) {
4882 if_inc_counter(ni->ni_vap->iv_ifp,
4883 IFCOUNTER_OERRORS, 1);
4884 ieee80211_free_node(ni);
4887 sc->sc_tx_timer = 15;
4889 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4893 iwm_stop(struct iwm_softc *sc)
4896 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4897 sc->sc_flags |= IWM_FLAG_STOPPED;
4898 sc->sc_generation++;
4899 iwm_led_blink_stop(sc);
4900 sc->sc_tx_timer = 0;
4901 iwm_stop_device(sc);
4905 iwm_watchdog(void *arg)
4907 struct iwm_softc *sc = arg;
4908 struct ieee80211com *ic = &sc->sc_ic;
4910 if (sc->sc_tx_timer > 0) {
4911 if (--sc->sc_tx_timer == 0) {
4912 device_printf(sc->sc_dev, "device timeout\n");
4916 ieee80211_restart_all(ic);
4917 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4921 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4925 iwm_parent(struct ieee80211com *ic)
4927 struct iwm_softc *sc = ic->ic_softc;
4931 if (ic->ic_nrunning > 0) {
4932 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4936 } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4940 ieee80211_start_all(ic);
4944 * The interrupt side of things
4948 * error dumping routines are from iwlwifi/mvm/utils.c
4952 * Note: This structure is read from the device with IO accesses,
4953 * and the reading already does the endian conversion. As it is
4954 * read with uint32_t-sized accesses, any members with a different size
4955 * need to be ordered correctly though!
4957 struct iwm_error_event_table {
4958 uint32_t valid; /* (nonzero) valid, (0) log is empty */
4959 uint32_t error_id; /* type of error */
4960 uint32_t trm_hw_status0; /* TRM HW status */
4961 uint32_t trm_hw_status1; /* TRM HW status */
4962 uint32_t blink2; /* branch link */
4963 uint32_t ilink1; /* interrupt link */
4964 uint32_t ilink2; /* interrupt link */
4965 uint32_t data1; /* error-specific data */
4966 uint32_t data2; /* error-specific data */
4967 uint32_t data3; /* error-specific data */
4968 uint32_t bcon_time; /* beacon timer */
4969 uint32_t tsf_low; /* network timestamp function timer */
4970 uint32_t tsf_hi; /* network timestamp function timer */
4971 uint32_t gp1; /* GP1 timer register */
4972 uint32_t gp2; /* GP2 timer register */
4973 uint32_t fw_rev_type; /* firmware revision type */
4974 uint32_t major; /* uCode version major */
4975 uint32_t minor; /* uCode version minor */
4976 uint32_t hw_ver; /* HW Silicon version */
4977 uint32_t brd_ver; /* HW board version */
4978 uint32_t log_pc; /* log program counter */
4979 uint32_t frame_ptr; /* frame pointer */
4980 uint32_t stack_ptr; /* stack pointer */
4981 uint32_t hcmd; /* last host command header */
4982 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
4984 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
4986 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
4988 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
4990 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
4992 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
4993 uint32_t wait_event; /* wait event() caller address */
4994 uint32_t l2p_control; /* L2pControlField */
4995 uint32_t l2p_duration; /* L2pDurationField */
4996 uint32_t l2p_mhvalid; /* L2pMhValidBits */
4997 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
4998 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
5000 uint32_t u_timestamp; /* indicate when the date and time of the
5002 uint32_t flow_handler; /* FH read/write pointers, RX credit */
5003 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5006 * UMAC error struct - relevant starting from family 8000 chip.
5007 * Note: This structure is read from the device with IO accesses,
5008 * and the reading already does the endian conversion. As it is
5009 * read with u32-sized accesses, any members with a different size
5010 * need to be ordered correctly though!
5012 struct iwm_umac_error_event_table {
5013 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5014 uint32_t error_id; /* type of error */
5015 uint32_t blink1; /* branch link */
5016 uint32_t blink2; /* branch link */
5017 uint32_t ilink1; /* interrupt link */
5018 uint32_t ilink2; /* interrupt link */
5019 uint32_t data1; /* error-specific data */
5020 uint32_t data2; /* error-specific data */
5021 uint32_t data3; /* error-specific data */
5022 uint32_t umac_major;
5023 uint32_t umac_minor;
5024 uint32_t frame_pointer; /* core register 27*/
5025 uint32_t stack_pointer; /* core register 28 */
5026 uint32_t cmd_header; /* latest host cmd sent to UMAC */
5027 uint32_t nic_isr_pref; /* ISR status register */
5030 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
5031 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
5037 } advanced_lookup[] = {
5038 { "NMI_INTERRUPT_WDG", 0x34 },
5039 { "SYSASSERT", 0x35 },
5040 { "UCODE_VERSION_MISMATCH", 0x37 },
5041 { "BAD_COMMAND", 0x38 },
5042 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5043 { "FATAL_ERROR", 0x3D },
5044 { "NMI_TRM_HW_ERR", 0x46 },
5045 { "NMI_INTERRUPT_TRM", 0x4C },
5046 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5047 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5048 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5049 { "NMI_INTERRUPT_HOST", 0x66 },
5050 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5051 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5052 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5053 { "ADVANCED_SYSASSERT", 0 },
5057 iwm_desc_lookup(uint32_t num)
5061 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5062 if (advanced_lookup[i].num == num)
5063 return advanced_lookup[i].name;
5065 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5066 return advanced_lookup[i].name;
5070 iwm_nic_umac_error(struct iwm_softc *sc)
5072 struct iwm_umac_error_event_table table;
5075 base = sc->sc_uc.uc_umac_error_event_table;
5077 if (base < 0x800000) {
5078 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5083 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5084 device_printf(sc->sc_dev, "reading errlog failed\n");
5088 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5089 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5090 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5091 sc->sc_flags, table.valid);
5094 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5095 iwm_desc_lookup(table.error_id));
5096 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5097 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5098 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5100 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5102 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5103 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5104 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5105 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5106 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5107 device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5108 table.frame_pointer);
5109 device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5110 table.stack_pointer);
5111 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5112 device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5113 table.nic_isr_pref);
5117 * Support for dumping the error log seemed like a good idea ...
5118 * but it's mostly hex junk and the only sensible thing is the
5119 * hw/ucode revision (which we know anyway). Since it's here,
5120 * I'll just leave it in, just in case e.g. the Intel guys want to
5121 * help us decipher some "ADVANCED_SYSASSERT" later.
5124 iwm_nic_error(struct iwm_softc *sc)
5126 struct iwm_error_event_table table;
5129 device_printf(sc->sc_dev, "dumping device error log\n");
5130 base = sc->sc_uc.uc_error_event_table;
5131 if (base < 0x800000) {
5132 device_printf(sc->sc_dev,
5133 "Invalid error log pointer 0x%08x\n", base);
5137 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5138 device_printf(sc->sc_dev, "reading errlog failed\n");
5143 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5147 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5148 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5149 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5150 sc->sc_flags, table.valid);
5153 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5154 iwm_desc_lookup(table.error_id));
5155 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5156 table.trm_hw_status0);
5157 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5158 table.trm_hw_status1);
5159 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5160 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5161 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5162 device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5163 device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5164 device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5165 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5166 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5167 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5168 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5169 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5170 device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5172 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5173 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5174 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5175 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5176 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5177 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5178 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5179 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5180 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5181 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5182 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5183 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5184 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5185 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5186 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5187 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5188 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5189 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5190 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5192 if (sc->sc_uc.uc_umac_error_event_table)
5193 iwm_nic_umac_error(sc);
5197 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5200 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5201 * Basic structure from if_iwn
5204 iwm_notif_intr(struct iwm_softc *sc)
5206 struct ieee80211com *ic = &sc->sc_ic;
5209 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5210 BUS_DMASYNC_POSTREAD);
5212 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5217 while (sc->rxq.cur != hw) {
5218 struct iwm_rx_ring *ring = &sc->rxq;
5219 struct iwm_rx_data *data = &ring->data[ring->cur];
5220 struct iwm_rx_packet *pkt;
5221 struct iwm_cmd_response *cresp;
5224 bus_dmamap_sync(ring->data_dmat, data->map,
5225 BUS_DMASYNC_POSTREAD);
5226 pkt = mtod(data->m, struct iwm_rx_packet *);
5228 qid = pkt->hdr.qid & ~0x80;
5231 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5232 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5233 "rx packet qid=%d idx=%d type=%x %d %d\n",
5234 pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5237 * randomly get these from the firmware, no idea why.
5238 * they at least seem harmless, so just ignore them for now
5240 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5241 || pkt->len_n_flags == htole32(0x55550000))) {
5247 case IWM_REPLY_RX_PHY_CMD:
5248 iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5251 case IWM_REPLY_RX_MPDU_CMD:
5252 iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5256 iwm_mvm_rx_tx_cmd(sc, pkt, data);
5259 case IWM_MISSED_BEACONS_NOTIFICATION: {
5260 struct iwm_missed_beacons_notif *resp;
5263 /* XXX look at mac_id to determine interface ID */
5264 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5266 resp = (void *)pkt->data;
5267 missed = le32toh(resp->consec_missed_beacons);
5269 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5270 "%s: MISSED_BEACON: mac_id=%d, "
5271 "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5274 le32toh(resp->mac_id),
5275 le32toh(resp->consec_missed_beacons_since_last_rx),
5276 le32toh(resp->consec_missed_beacons),
5277 le32toh(resp->num_expected_beacons),
5278 le32toh(resp->num_recvd_beacons));
5284 /* XXX no net80211 locking? */
5285 if (vap->iv_state == IEEE80211_S_RUN &&
5286 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5287 if (missed > vap->iv_bmissthreshold) {
5288 /* XXX bad locking; turn into task */
5290 ieee80211_beacon_miss(ic);
5297 case IWM_MFUART_LOAD_NOTIFICATION:
5300 case IWM_MVM_ALIVE: {
5301 struct iwm_mvm_alive_resp_v1 *resp1;
5302 struct iwm_mvm_alive_resp_v2 *resp2;
5303 struct iwm_mvm_alive_resp_v3 *resp3;
5305 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
5306 resp1 = (void *)pkt->data;
5307 sc->sc_uc.uc_error_event_table
5308 = le32toh(resp1->error_event_table_ptr);
5309 sc->sc_uc.uc_log_event_table
5310 = le32toh(resp1->log_event_table_ptr);
5311 sc->sched_base = le32toh(resp1->scd_base_ptr);
5312 if (resp1->status == IWM_ALIVE_STATUS_OK)
5313 sc->sc_uc.uc_ok = 1;
5315 sc->sc_uc.uc_ok = 0;
5318 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
5319 resp2 = (void *)pkt->data;
5320 sc->sc_uc.uc_error_event_table
5321 = le32toh(resp2->error_event_table_ptr);
5322 sc->sc_uc.uc_log_event_table
5323 = le32toh(resp2->log_event_table_ptr);
5324 sc->sched_base = le32toh(resp2->scd_base_ptr);
5325 sc->sc_uc.uc_umac_error_event_table
5326 = le32toh(resp2->error_info_addr);
5327 if (resp2->status == IWM_ALIVE_STATUS_OK)
5328 sc->sc_uc.uc_ok = 1;
5330 sc->sc_uc.uc_ok = 0;
5333 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
5334 resp3 = (void *)pkt->data;
5335 sc->sc_uc.uc_error_event_table
5336 = le32toh(resp3->error_event_table_ptr);
5337 sc->sc_uc.uc_log_event_table
5338 = le32toh(resp3->log_event_table_ptr);
5339 sc->sched_base = le32toh(resp3->scd_base_ptr);
5340 sc->sc_uc.uc_umac_error_event_table
5341 = le32toh(resp3->error_info_addr);
5342 if (resp3->status == IWM_ALIVE_STATUS_OK)
5343 sc->sc_uc.uc_ok = 1;
5345 sc->sc_uc.uc_ok = 0;
5348 sc->sc_uc.uc_intr = 1;
5352 case IWM_CALIB_RES_NOTIF_PHY_DB: {
5353 struct iwm_calib_res_notif_phy_db *phy_db_notif;
5354 phy_db_notif = (void *)pkt->data;
5356 iwm_phy_db_set_section(sc->sc_phy_db, phy_db_notif);
5360 case IWM_STATISTICS_NOTIFICATION: {
5361 struct iwm_notif_statistics *stats;
5362 stats = (void *)pkt->data;
5363 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5364 sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5367 case IWM_NVM_ACCESS_CMD:
5368 case IWM_MCC_UPDATE_CMD:
5369 if (sc->sc_wantresp == ((qid << 16) | idx)) {
5370 memcpy(sc->sc_cmd_resp,
5371 pkt, sizeof(sc->sc_cmd_resp));
5375 case IWM_MCC_CHUB_UPDATE_CMD: {
5376 struct iwm_mcc_chub_notif *notif;
5377 notif = (void *)pkt->data;
5379 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5380 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5381 sc->sc_fw_mcc[2] = '\0';
5382 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5383 "fw source %d sent CC '%s'\n",
5384 notif->source_id, sc->sc_fw_mcc);
5387 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5390 case IWM_PHY_CONFIGURATION_CMD:
5391 case IWM_TX_ANT_CONFIGURATION_CMD:
5393 case IWM_MAC_CONTEXT_CMD:
5394 case IWM_REPLY_SF_CFG_CMD:
5395 case IWM_POWER_TABLE_CMD:
5396 case IWM_PHY_CONTEXT_CMD:
5397 case IWM_BINDING_CONTEXT_CMD:
5398 case IWM_TIME_EVENT_CMD:
5399 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5400 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5401 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5402 case IWM_REPLY_BEACON_FILTERING_CMD:
5403 case IWM_MAC_PM_POWER_TABLE:
5404 case IWM_TIME_QUOTA_CMD:
5405 case IWM_REMOVE_STA:
5406 case IWM_TXPATH_FLUSH:
5409 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5410 cresp = (void *)pkt->data;
5411 if (sc->sc_wantresp == ((qid << 16) | idx)) {
5412 memcpy(sc->sc_cmd_resp,
5413 pkt, sizeof(*pkt)+sizeof(*cresp));
5418 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5421 case IWM_INIT_COMPLETE_NOTIF:
5422 sc->sc_init_complete = 1;
5423 wakeup(&sc->sc_init_complete);
5426 case IWM_SCAN_OFFLOAD_COMPLETE: {
5427 struct iwm_periodic_scan_complete *notif;
5428 notif = (void *)pkt->data;
5432 case IWM_SCAN_ITERATION_COMPLETE: {
5433 struct iwm_lmac_scan_complete_notif *notif;
5434 notif = (void *)pkt->data;
5435 ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5439 case IWM_SCAN_COMPLETE_UMAC: {
5440 struct iwm_umac_scan_complete *notif;
5441 notif = (void *)pkt->data;
5443 IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5444 "UMAC scan complete, status=0x%x\n",
5446 #if 0 /* XXX This would be a duplicate scan end call */
5447 taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5452 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5453 struct iwm_umac_scan_iter_complete_notif *notif;
5454 notif = (void *)pkt->data;
5456 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5457 "complete, status=0x%x, %d channels scanned\n",
5458 notif->status, notif->scanned_channels);
5459 ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5463 case IWM_REPLY_ERROR: {
5464 struct iwm_error_resp *resp;
5465 resp = (void *)pkt->data;
5467 device_printf(sc->sc_dev,
5468 "firmware error 0x%x, cmd 0x%x\n",
5469 le32toh(resp->error_type),
5474 case IWM_TIME_EVENT_NOTIFICATION: {
5475 struct iwm_time_event_notif *notif;
5476 notif = (void *)pkt->data;
5478 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5479 "TE notif status = 0x%x action = 0x%x\n",
5480 notif->status, notif->action);
5484 case IWM_MCAST_FILTER_CMD:
5487 case IWM_SCD_QUEUE_CFG: {
5488 struct iwm_scd_txq_cfg_rsp *rsp;
5489 rsp = (void *)pkt->data;
5491 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5492 "queue cfg token=0x%x sta_id=%d "
5493 "tid=%d scd_queue=%d\n",
5494 rsp->token, rsp->sta_id, rsp->tid,
5500 device_printf(sc->sc_dev,
5501 "frame %d/%d %x UNHANDLED (this should "
5502 "not happen)\n", qid, idx,
5508 * Why test bit 0x80? The Linux driver:
5510 * There is one exception: uCode sets bit 15 when it
5511 * originates the response/notification, i.e. when the
5512 * response/notification is not a direct response to a
5513 * command sent by the driver. For example, uCode issues
5514 * IWM_REPLY_RX when it sends a received frame to the driver;
5515 * it is not a direct response to any driver command.
5517 * Ok, so since when is 7 == 15? Well, the Linux driver
5518 * uses a slightly different format for pkt->hdr, and "qid"
5519 * is actually the upper byte of a two-byte field.
5521 if (!(pkt->hdr.qid & (1 << 7))) {
5522 iwm_cmd_done(sc, pkt);
5528 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
5529 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5532 * Tell the firmware what we have processed.
5533 * Seems like the hardware gets upset unless we align
5536 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5537 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5543 struct iwm_softc *sc = arg;
5549 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5551 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5552 uint32_t *ict = sc->ict_dma.vaddr;
5555 tmp = htole32(ict[sc->ict_cur]);
5560 * ok, there was something. keep plowing until we have all.
5565 ict[sc->ict_cur] = 0;
5566 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5567 tmp = htole32(ict[sc->ict_cur]);
5570 /* this is where the fun begins. don't ask */
5571 if (r1 == 0xffffffff)
5574 /* i am not expected to understand this */
5577 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5579 r1 = IWM_READ(sc, IWM_CSR_INT);
5580 /* "hardware gone" (where, fishing?) */
5581 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5583 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5585 if (r1 == 0 && r2 == 0) {
5589 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5592 handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
5594 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5596 struct ieee80211com *ic = &sc->sc_ic;
5597 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5602 /* Dump driver status (TX and RX rings) while we're here. */
5603 device_printf(sc->sc_dev, "driver status:\n");
5604 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5605 struct iwm_tx_ring *ring = &sc->txq[i];
5606 device_printf(sc->sc_dev,
5607 " tx ring %2d: qid=%-2d cur=%-3d "
5609 i, ring->qid, ring->cur, ring->queued);
5611 device_printf(sc->sc_dev,
5612 " rx ring: cur=%d\n", sc->rxq.cur);
5613 device_printf(sc->sc_dev,
5614 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5616 /* Don't stop the device; just do a VAP restart */
5620 printf("%s: null vap\n", __func__);
5624 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5625 "restarting\n", __func__, vap->iv_state);
5627 /* XXX TODO: turn this into a callout/taskqueue */
5628 ieee80211_restart_all(ic);
5632 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5633 handled |= IWM_CSR_INT_BIT_HW_ERR;
5634 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5640 /* firmware chunk loaded */
5641 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5642 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5643 handled |= IWM_CSR_INT_BIT_FH_TX;
5644 sc->sc_fw_chunk_done = 1;
5648 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5649 handled |= IWM_CSR_INT_BIT_RF_KILL;
5650 if (iwm_check_rfkill(sc)) {
5651 device_printf(sc->sc_dev,
5652 "%s: rfkill switch, disabling interface\n",
5659 * The Linux driver uses periodic interrupts to avoid races.
5660 * We cargo-cult like it's going out of fashion.
5662 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5663 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5664 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5665 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5667 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5671 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5672 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5673 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5677 /* enable periodic interrupt, see above */
5678 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5679 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5680 IWM_CSR_INT_PERIODIC_ENA);
5683 if (__predict_false(r1 & ~handled))
5684 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5685 "%s: unhandled interrupts: %x\n", __func__, r1);
5689 iwm_restore_interrupts(sc);
5696 * Autoconf glue-sniffing
5698 #define PCI_VENDOR_INTEL 0x8086
5699 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3
5700 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4
5701 #define PCI_PRODUCT_INTEL_WL_3165_1 0x3165
5702 #define PCI_PRODUCT_INTEL_WL_3165_2 0x3166
5703 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1
5704 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2
5705 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a
5706 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b
5707 #define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3
5708 #define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4
5710 static const struct iwm_devices {
5714 { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5715 { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5716 { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5717 { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5718 { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5719 { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5720 { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5721 { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5722 { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5723 { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5727 iwm_probe(device_t dev)
5731 for (i = 0; i < nitems(iwm_devices); i++) {
5732 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5733 pci_get_device(dev) == iwm_devices[i].device) {
5734 device_set_desc(dev, iwm_devices[i].name);
5735 return (BUS_PROBE_DEFAULT);
5743 iwm_dev_check(device_t dev)
5745 struct iwm_softc *sc;
5747 sc = device_get_softc(dev);
5749 switch (pci_get_device(dev)) {
5750 case PCI_PRODUCT_INTEL_WL_3160_1:
5751 case PCI_PRODUCT_INTEL_WL_3160_2:
5752 sc->cfg = &iwm3160_cfg;
5753 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5755 case PCI_PRODUCT_INTEL_WL_3165_1:
5756 case PCI_PRODUCT_INTEL_WL_3165_2:
5757 sc->cfg = &iwm3165_cfg;
5758 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5760 case PCI_PRODUCT_INTEL_WL_7260_1:
5761 case PCI_PRODUCT_INTEL_WL_7260_2:
5762 sc->cfg = &iwm7260_cfg;
5763 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5765 case PCI_PRODUCT_INTEL_WL_7265_1:
5766 case PCI_PRODUCT_INTEL_WL_7265_2:
5767 sc->cfg = &iwm7265_cfg;
5768 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5770 case PCI_PRODUCT_INTEL_WL_8260_1:
5771 case PCI_PRODUCT_INTEL_WL_8260_2:
5772 sc->cfg = &iwm8260_cfg;
5773 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
5776 device_printf(dev, "unknown adapter type\n");
5782 iwm_pci_attach(device_t dev)
5784 struct iwm_softc *sc;
5785 int count, error, rid;
5788 sc = device_get_softc(dev);
5790 /* Clear device-specific "PCI retry timeout" register (41h). */
5791 reg = pci_read_config(dev, 0x40, sizeof(reg));
5792 pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5794 /* Enable bus-mastering and hardware bug workaround. */
5795 pci_enable_busmaster(dev);
5796 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5798 if (reg & PCIM_STATUS_INTxSTATE) {
5799 reg &= ~PCIM_STATUS_INTxSTATE;
5801 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5804 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5806 if (sc->sc_mem == NULL) {
5807 device_printf(sc->sc_dev, "can't map mem space\n");
5810 sc->sc_st = rman_get_bustag(sc->sc_mem);
5811 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5813 /* Install interrupt handler. */
5816 if (pci_alloc_msi(dev, &count) == 0)
5818 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5819 (rid != 0 ? 0 : RF_SHAREABLE));
5820 if (sc->sc_irq == NULL) {
5821 device_printf(dev, "can't map interrupt\n");
5824 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5825 NULL, iwm_intr, sc, &sc->sc_ih);
5826 if (sc->sc_ih == NULL) {
5827 device_printf(dev, "can't establish interrupt");
5830 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5836 iwm_pci_detach(device_t dev)
5838 struct iwm_softc *sc = device_get_softc(dev);
5840 if (sc->sc_irq != NULL) {
5841 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5842 bus_release_resource(dev, SYS_RES_IRQ,
5843 rman_get_rid(sc->sc_irq), sc->sc_irq);
5844 pci_release_msi(dev);
5846 if (sc->sc_mem != NULL)
5847 bus_release_resource(dev, SYS_RES_MEMORY,
5848 rman_get_rid(sc->sc_mem), sc->sc_mem);
5854 iwm_attach(device_t dev)
5856 struct iwm_softc *sc = device_get_softc(dev);
5857 struct ieee80211com *ic = &sc->sc_ic;
5862 sc->sc_attached = 1;
5864 mbufq_init(&sc->sc_snd, ifqmaxlen);
5865 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5866 callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5867 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5870 sc->sc_phy_db = iwm_phy_db_init(sc);
5871 if (!sc->sc_phy_db) {
5872 device_printf(dev, "Cannot init phy_db\n");
5877 error = iwm_pci_attach(dev);
5881 sc->sc_wantresp = -1;
5883 /* Check device type */
5884 error = iwm_dev_check(dev);
5888 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5890 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5891 * changed, and now the revision step also includes bit 0-1 (no more
5892 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5893 * in the old format.
5895 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
5896 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5897 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5899 if (iwm_prepare_card_hw(sc) != 0) {
5900 device_printf(dev, "could not initialize hardware\n");
5904 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5909 * In order to recognize C step the driver should read the
5910 * chip version id located at the AUX bus MISC address.
5912 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5913 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5916 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5917 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5918 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5921 device_printf(sc->sc_dev,
5922 "Failed to wake up the nic\n");
5926 if (iwm_nic_lock(sc)) {
5927 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5928 hw_step |= IWM_ENABLE_WFPM;
5929 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5930 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5931 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5933 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5934 (IWM_SILICON_C_STEP << 2);
5937 device_printf(sc->sc_dev, "Failed to lock the nic\n");
5942 /* special-case 7265D, it has the same PCI IDs. */
5943 if (sc->cfg == &iwm7265_cfg &&
5944 (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5945 sc->cfg = &iwm7265d_cfg;
5948 /* Allocate DMA memory for firmware transfers. */
5949 if ((error = iwm_alloc_fwmem(sc)) != 0) {
5950 device_printf(dev, "could not allocate memory for firmware\n");
5954 /* Allocate "Keep Warm" page. */
5955 if ((error = iwm_alloc_kw(sc)) != 0) {
5956 device_printf(dev, "could not allocate keep warm page\n");
5960 /* We use ICT interrupts */
5961 if ((error = iwm_alloc_ict(sc)) != 0) {
5962 device_printf(dev, "could not allocate ICT table\n");
5966 /* Allocate TX scheduler "rings". */
5967 if ((error = iwm_alloc_sched(sc)) != 0) {
5968 device_printf(dev, "could not allocate TX scheduler rings\n");
5972 /* Allocate TX rings */
5973 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5974 if ((error = iwm_alloc_tx_ring(sc,
5975 &sc->txq[txq_i], txq_i)) != 0) {
5977 "could not allocate TX ring %d\n",
5983 /* Allocate RX ring. */
5984 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5985 device_printf(dev, "could not allocate RX ring\n");
5989 /* Clear pending interrupts. */
5990 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5993 ic->ic_name = device_get_nameunit(sc->sc_dev);
5994 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
5995 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
5997 /* Set device capabilities. */
6000 IEEE80211_C_WPA | /* WPA/RSN */
6002 IEEE80211_C_SHSLOT | /* short slot time supported */
6003 IEEE80211_C_SHPREAMBLE /* short preamble supported */
6004 // IEEE80211_C_BGSCAN /* capable of bg scanning */
6006 /* Advertise full-offload scanning */
6007 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6008 for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6009 sc->sc_phyctxt[i].id = i;
6010 sc->sc_phyctxt[i].color = 0;
6011 sc->sc_phyctxt[i].ref = 0;
6012 sc->sc_phyctxt[i].channel = NULL;
6015 /* Default noise floor */
6019 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6021 sc->sc_preinit_hook.ich_func = iwm_preinit;
6022 sc->sc_preinit_hook.ich_arg = sc;
6023 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6024 device_printf(dev, "config_intrhook_establish failed\n");
6029 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6030 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6031 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6034 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6035 "<-%s\n", __func__);
6039 /* Free allocated memory if something failed during attachment. */
6041 iwm_detach_local(sc, 0);
6047 iwm_is_valid_ether_addr(uint8_t *addr)
6049 char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6051 if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6058 iwm_update_edca(struct ieee80211com *ic)
6060 struct iwm_softc *sc = ic->ic_softc;
6062 device_printf(sc->sc_dev, "%s: called\n", __func__);
6067 iwm_preinit(void *arg)
6069 struct iwm_softc *sc = arg;
6070 device_t dev = sc->sc_dev;
6071 struct ieee80211com *ic = &sc->sc_ic;
6074 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6075 "->%s\n", __func__);
6078 if ((error = iwm_start_hw(sc)) != 0) {
6079 device_printf(dev, "could not initialize hardware\n");
6084 error = iwm_run_init_mvm_ucode(sc, 1);
6085 iwm_stop_device(sc);
6091 "hw rev 0x%x, fw ver %s, address %s\n",
6092 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6093 sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6095 /* not all hardware can do 5GHz band */
6096 if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6097 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6098 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6101 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6105 * At this point we've committed - if we fail to do setup,
6106 * we now also have to tear down the net80211 state.
6108 ieee80211_ifattach(ic);
6109 ic->ic_vap_create = iwm_vap_create;
6110 ic->ic_vap_delete = iwm_vap_delete;
6111 ic->ic_raw_xmit = iwm_raw_xmit;
6112 ic->ic_node_alloc = iwm_node_alloc;
6113 ic->ic_scan_start = iwm_scan_start;
6114 ic->ic_scan_end = iwm_scan_end;
6115 ic->ic_update_mcast = iwm_update_mcast;
6116 ic->ic_getradiocaps = iwm_init_channel_map;
6117 ic->ic_set_channel = iwm_set_channel;
6118 ic->ic_scan_curchan = iwm_scan_curchan;
6119 ic->ic_scan_mindwell = iwm_scan_mindwell;
6120 ic->ic_wme.wme_update = iwm_update_edca;
6121 ic->ic_parent = iwm_parent;
6122 ic->ic_transmit = iwm_transmit;
6123 iwm_radiotap_attach(sc);
6125 ieee80211_announce(ic);
6127 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6128 "<-%s\n", __func__);
6129 config_intrhook_disestablish(&sc->sc_preinit_hook);
6133 config_intrhook_disestablish(&sc->sc_preinit_hook);
6134 iwm_detach_local(sc, 0);
6138 * Attach the interface to 802.11 radiotap.
6141 iwm_radiotap_attach(struct iwm_softc *sc)
6143 struct ieee80211com *ic = &sc->sc_ic;
6145 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6146 "->%s begin\n", __func__);
6147 ieee80211_radiotap_attach(ic,
6148 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6149 IWM_TX_RADIOTAP_PRESENT,
6150 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6151 IWM_RX_RADIOTAP_PRESENT);
6152 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6153 "->%s end\n", __func__);
6156 static struct ieee80211vap *
6157 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6158 enum ieee80211_opmode opmode, int flags,
6159 const uint8_t bssid[IEEE80211_ADDR_LEN],
6160 const uint8_t mac[IEEE80211_ADDR_LEN])
6162 struct iwm_vap *ivp;
6163 struct ieee80211vap *vap;
6165 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
6167 ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6169 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6170 vap->iv_bmissthreshold = 10; /* override default */
6171 /* Override with driver methods. */
6172 ivp->iv_newstate = vap->iv_newstate;
6173 vap->iv_newstate = iwm_newstate;
6175 ieee80211_ratectl_init(vap);
6176 /* Complete setup. */
6177 ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6179 ic->ic_opmode = opmode;
6185 iwm_vap_delete(struct ieee80211vap *vap)
6187 struct iwm_vap *ivp = IWM_VAP(vap);
6189 ieee80211_ratectl_deinit(vap);
6190 ieee80211_vap_detach(vap);
6191 free(ivp, M_80211_VAP);
6195 iwm_scan_start(struct ieee80211com *ic)
6197 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6198 struct iwm_softc *sc = ic->ic_softc;
6202 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6203 error = iwm_mvm_umac_scan(sc);
6205 error = iwm_mvm_lmac_scan(sc);
6207 device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
6209 ieee80211_cancel_scan(vap);
6211 iwm_led_blink_start(sc);
6217 iwm_scan_end(struct ieee80211com *ic)
6219 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6220 struct iwm_softc *sc = ic->ic_softc;
6223 iwm_led_blink_stop(sc);
6224 if (vap->iv_state == IEEE80211_S_RUN)
6225 iwm_mvm_led_enable(sc);
6230 iwm_update_mcast(struct ieee80211com *ic)
6235 iwm_set_channel(struct ieee80211com *ic)
6240 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6245 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6251 iwm_init_task(void *arg1)
6253 struct iwm_softc *sc = arg1;
6256 while (sc->sc_flags & IWM_FLAG_BUSY)
6257 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6258 sc->sc_flags |= IWM_FLAG_BUSY;
6260 if (sc->sc_ic.ic_nrunning > 0)
6262 sc->sc_flags &= ~IWM_FLAG_BUSY;
6263 wakeup(&sc->sc_flags);
6268 iwm_resume(device_t dev)
6270 struct iwm_softc *sc = device_get_softc(dev);
6274 /* Clear device-specific "PCI retry timeout" register (41h). */
6275 reg = pci_read_config(dev, 0x40, sizeof(reg));
6276 pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
6277 iwm_init_task(device_get_softc(dev));
6280 if (sc->sc_flags & IWM_FLAG_SCANNING) {
6281 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6287 ieee80211_resume_all(&sc->sc_ic);
6293 iwm_suspend(device_t dev)
6296 struct iwm_softc *sc = device_get_softc(dev);
6298 do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6300 ieee80211_suspend_all(&sc->sc_ic);
6305 sc->sc_flags |= IWM_FLAG_SCANNING;
6313 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6315 struct iwm_fw_info *fw = &sc->sc_fw;
6316 device_t dev = sc->sc_dev;
6319 if (!sc->sc_attached)
6321 sc->sc_attached = 0;
6324 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6326 callout_drain(&sc->sc_led_blink_to);
6327 callout_drain(&sc->sc_watchdog_to);
6328 iwm_stop_device(sc);
6330 ieee80211_ifdetach(&sc->sc_ic);
6333 iwm_phy_db_free(sc->sc_phy_db);
6334 sc->sc_phy_db = NULL;
6336 iwm_free_nvm_data(sc->nvm_data);
6338 /* Free descriptor rings */
6339 iwm_free_rx_ring(sc, &sc->rxq);
6340 for (i = 0; i < nitems(sc->txq); i++)
6341 iwm_free_tx_ring(sc, &sc->txq[i]);
6344 if (fw->fw_fp != NULL)
6345 iwm_fw_info_free(fw);
6347 /* Free scheduler */
6348 iwm_dma_contig_free(&sc->sched_dma);
6349 iwm_dma_contig_free(&sc->ict_dma);
6350 iwm_dma_contig_free(&sc->kw_dma);
6351 iwm_dma_contig_free(&sc->fw_dma);
6353 /* Finished with the hardware - detach things */
6354 iwm_pci_detach(dev);
6356 mbufq_drain(&sc->sc_snd);
6357 IWM_LOCK_DESTROY(sc);
6363 iwm_detach(device_t dev)
6365 struct iwm_softc *sc = device_get_softc(dev);
6367 return (iwm_detach_local(sc, 1));
6370 static device_method_t iwm_pci_methods[] = {
6371 /* Device interface */
6372 DEVMETHOD(device_probe, iwm_probe),
6373 DEVMETHOD(device_attach, iwm_attach),
6374 DEVMETHOD(device_detach, iwm_detach),
6375 DEVMETHOD(device_suspend, iwm_suspend),
6376 DEVMETHOD(device_resume, iwm_resume),
6381 static driver_t iwm_pci_driver = {
6384 sizeof (struct iwm_softc)
6387 static devclass_t iwm_devclass;
6389 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6390 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6391 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6392 MODULE_DEPEND(iwm, wlan, 1, 1, 1);