1 /* $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $ */
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
27 ***********************************************************************
29 * This file is provided under a dual BSD/GPLv2 license. When using or
30 * redistributing this file, you may do so under either license.
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
53 * Contact Information:
54 * Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
67 * * Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * * Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in
71 * the documentation and/or other materials provided with the
73 * * Neither the name Intel Corporation nor the names of its
74 * contributors may be used to endorse or promote products derived
75 * from this software without specific prior written permission.
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
108 #include "opt_wlan.h"
110 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_notif_wait.h>
157 #include <dev/iwm/if_iwm_util.h>
158 #include <dev/iwm/if_iwm_binding.h>
159 #include <dev/iwm/if_iwm_phy_db.h>
160 #include <dev/iwm/if_iwm_mac_ctxt.h>
161 #include <dev/iwm/if_iwm_phy_ctxt.h>
162 #include <dev/iwm/if_iwm_time_event.h>
163 #include <dev/iwm/if_iwm_power.h>
164 #include <dev/iwm/if_iwm_scan.h>
166 #include <dev/iwm/if_iwm_pcie_trans.h>
167 #include <dev/iwm/if_iwm_led.h>
169 #define IWM_NVM_HW_SECTION_NUM_FAMILY_7000 0
170 #define IWM_NVM_HW_SECTION_NUM_FAMILY_8000 10
172 /* lower blocks contain EEPROM image and calibration data */
173 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(uint16_t)) /* 16 KB */
174 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(uint16_t)) /* 32 KB */
176 #define IWM7260_FW "iwm7260fw"
177 #define IWM3160_FW "iwm3160fw"
178 #define IWM7265_FW "iwm7265fw"
179 #define IWM7265D_FW "iwm7265Dfw"
180 #define IWM8000_FW "iwm8000Cfw"
182 #define IWM_DEVICE_7000_COMMON \
183 .device_family = IWM_DEVICE_FAMILY_7000, \
184 .eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000, \
185 .nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_7000
187 const struct iwm_cfg iwm7260_cfg = {
188 .fw_name = IWM7260_FW,
189 IWM_DEVICE_7000_COMMON,
190 .host_interrupt_operation_mode = 1,
193 const struct iwm_cfg iwm3160_cfg = {
194 .fw_name = IWM3160_FW,
195 IWM_DEVICE_7000_COMMON,
196 .host_interrupt_operation_mode = 1,
199 const struct iwm_cfg iwm3165_cfg = {
200 /* XXX IWM7265D_FW doesn't seem to work properly yet */
201 .fw_name = IWM7265_FW,
202 IWM_DEVICE_7000_COMMON,
203 .host_interrupt_operation_mode = 0,
206 const struct iwm_cfg iwm7265_cfg = {
207 .fw_name = IWM7265_FW,
208 IWM_DEVICE_7000_COMMON,
209 .host_interrupt_operation_mode = 0,
212 const struct iwm_cfg iwm7265d_cfg = {
213 /* XXX IWM7265D_FW doesn't seem to work properly yet */
214 .fw_name = IWM7265_FW,
215 IWM_DEVICE_7000_COMMON,
216 .host_interrupt_operation_mode = 0,
219 #define IWM_DEVICE_8000_COMMON \
220 .device_family = IWM_DEVICE_FAMILY_8000, \
221 .eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000, \
222 .nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_8000
224 const struct iwm_cfg iwm8260_cfg = {
225 .fw_name = IWM8000_FW,
226 IWM_DEVICE_8000_COMMON,
227 .host_interrupt_operation_mode = 0,
230 const uint8_t iwm_nvm_channels[] = {
232 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
234 36, 40, 44, 48, 52, 56, 60, 64,
235 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
236 149, 153, 157, 161, 165
238 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
239 "IWM_NUM_CHANNELS is too small");
241 const uint8_t iwm_nvm_channels_8000[] = {
243 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
245 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
246 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
247 149, 153, 157, 161, 165, 169, 173, 177, 181
249 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
250 "IWM_NUM_CHANNELS_8000 is too small");
252 #define IWM_NUM_2GHZ_CHANNELS 14
253 #define IWM_N_HW_ADDR_MASK 0xF
256 * XXX For now, there's simply a fixed set of rate table entries
257 * that are populated.
259 const struct iwm_rate {
263 { 2, IWM_RATE_1M_PLCP },
264 { 4, IWM_RATE_2M_PLCP },
265 { 11, IWM_RATE_5M_PLCP },
266 { 22, IWM_RATE_11M_PLCP },
267 { 12, IWM_RATE_6M_PLCP },
268 { 18, IWM_RATE_9M_PLCP },
269 { 24, IWM_RATE_12M_PLCP },
270 { 36, IWM_RATE_18M_PLCP },
271 { 48, IWM_RATE_24M_PLCP },
272 { 72, IWM_RATE_36M_PLCP },
273 { 96, IWM_RATE_48M_PLCP },
274 { 108, IWM_RATE_54M_PLCP },
276 #define IWM_RIDX_CCK 0
277 #define IWM_RIDX_OFDM 4
278 #define IWM_RIDX_MAX (nitems(iwm_rates)-1)
279 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
280 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
282 struct iwm_nvm_section {
287 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
288 static int iwm_firmware_store_section(struct iwm_softc *,
290 const uint8_t *, size_t);
291 static int iwm_set_default_calib(struct iwm_softc *, const void *);
292 static void iwm_fw_info_free(struct iwm_fw_info *);
293 static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
294 static void iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
295 static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
296 bus_size_t, bus_size_t);
297 static void iwm_dma_contig_free(struct iwm_dma_info *);
298 static int iwm_alloc_fwmem(struct iwm_softc *);
299 static int iwm_alloc_sched(struct iwm_softc *);
300 static int iwm_alloc_kw(struct iwm_softc *);
301 static int iwm_alloc_ict(struct iwm_softc *);
302 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
303 static void iwm_disable_rx_dma(struct iwm_softc *);
304 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
305 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
306 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
308 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
309 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
310 static void iwm_enable_interrupts(struct iwm_softc *);
311 static void iwm_restore_interrupts(struct iwm_softc *);
312 static void iwm_disable_interrupts(struct iwm_softc *);
313 static void iwm_ict_reset(struct iwm_softc *);
314 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
315 static void iwm_stop_device(struct iwm_softc *);
316 static void iwm_mvm_nic_config(struct iwm_softc *);
317 static int iwm_nic_rx_init(struct iwm_softc *);
318 static int iwm_nic_tx_init(struct iwm_softc *);
319 static int iwm_nic_init(struct iwm_softc *);
320 static int iwm_enable_txq(struct iwm_softc *, int, int, int);
321 static int iwm_post_alive(struct iwm_softc *);
322 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
323 uint16_t, uint8_t *, uint16_t *);
324 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
325 uint16_t *, uint32_t);
326 static uint32_t iwm_eeprom_channel_flags(uint16_t);
327 static void iwm_add_channel_band(struct iwm_softc *,
328 struct ieee80211_channel[], int, int *, int, size_t,
330 static void iwm_init_channel_map(struct ieee80211com *, int, int *,
331 struct ieee80211_channel[]);
332 static struct iwm_nvm_data *
333 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
334 const uint16_t *, const uint16_t *,
335 const uint16_t *, const uint16_t *,
337 static void iwm_free_nvm_data(struct iwm_nvm_data *);
338 static void iwm_set_hw_address_family_8000(struct iwm_softc *,
339 struct iwm_nvm_data *,
342 static int iwm_get_sku(const struct iwm_softc *, const uint16_t *,
344 static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
345 static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
347 static int iwm_get_n_hw_addrs(const struct iwm_softc *,
349 static void iwm_set_radio_cfg(const struct iwm_softc *,
350 struct iwm_nvm_data *, uint32_t);
351 static struct iwm_nvm_data *
352 iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
353 static int iwm_nvm_init(struct iwm_softc *);
354 static int iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
355 const uint8_t *, uint32_t);
356 static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
357 const uint8_t *, uint32_t);
358 static int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
359 static int iwm_load_cpu_sections_8000(struct iwm_softc *,
360 struct iwm_fw_sects *, int , int *);
361 static int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
362 static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
363 static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
364 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
365 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
366 static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
367 enum iwm_ucode_type);
368 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
369 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
370 static int iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
371 static int iwm_mvm_get_signal_strength(struct iwm_softc *,
372 struct iwm_rx_phy_info *);
373 static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
374 struct iwm_rx_packet *,
375 struct iwm_rx_data *);
376 static int iwm_get_noise(struct iwm_softc *sc,
377 const struct iwm_mvm_statistics_rx_non_phy *);
378 static void iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
379 struct iwm_rx_data *);
380 static int iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
381 struct iwm_rx_packet *,
383 static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
384 struct iwm_rx_data *);
385 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
387 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
390 static const struct iwm_rate *
391 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
392 struct mbuf *, struct iwm_tx_cmd *);
393 static int iwm_tx(struct iwm_softc *, struct mbuf *,
394 struct ieee80211_node *, int);
395 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
396 const struct ieee80211_bpf_params *);
397 static int iwm_mvm_flush_tx_path(struct iwm_softc *sc,
398 uint32_t tfd_msk, uint32_t flags);
399 static int iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
400 struct iwm_mvm_add_sta_cmd_v7 *,
402 static int iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
404 static int iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
405 static int iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
406 static int iwm_mvm_add_int_sta_common(struct iwm_softc *,
407 struct iwm_int_sta *,
408 const uint8_t *, uint16_t, uint16_t);
409 static int iwm_mvm_add_aux_sta(struct iwm_softc *);
410 static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
411 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *);
412 static int iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
413 static int iwm_release(struct iwm_softc *, struct iwm_node *);
414 static struct ieee80211_node *
415 iwm_node_alloc(struct ieee80211vap *,
416 const uint8_t[IEEE80211_ADDR_LEN]);
417 static void iwm_setrates(struct iwm_softc *, struct iwm_node *);
418 static int iwm_media_change(struct ifnet *);
419 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
420 static void iwm_endscan_cb(void *, int);
421 static void iwm_mvm_fill_sf_command(struct iwm_softc *,
422 struct iwm_sf_cfg_cmd *,
423 struct ieee80211_node *);
424 static int iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
425 static int iwm_send_bt_init_conf(struct iwm_softc *);
426 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
427 static void iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
428 static int iwm_init_hw(struct iwm_softc *);
429 static void iwm_init(struct iwm_softc *);
430 static void iwm_start(struct iwm_softc *);
431 static void iwm_stop(struct iwm_softc *);
432 static void iwm_watchdog(void *);
433 static void iwm_parent(struct ieee80211com *);
436 iwm_desc_lookup(uint32_t);
437 static void iwm_nic_error(struct iwm_softc *);
438 static void iwm_nic_umac_error(struct iwm_softc *);
440 static void iwm_notif_intr(struct iwm_softc *);
441 static void iwm_intr(void *);
442 static int iwm_attach(device_t);
443 static int iwm_is_valid_ether_addr(uint8_t *);
444 static void iwm_preinit(void *);
445 static int iwm_detach_local(struct iwm_softc *sc, int);
446 static void iwm_init_task(void *);
447 static void iwm_radiotap_attach(struct iwm_softc *);
448 static struct ieee80211vap *
449 iwm_vap_create(struct ieee80211com *,
450 const char [IFNAMSIZ], int,
451 enum ieee80211_opmode, int,
452 const uint8_t [IEEE80211_ADDR_LEN],
453 const uint8_t [IEEE80211_ADDR_LEN]);
454 static void iwm_vap_delete(struct ieee80211vap *);
455 static void iwm_scan_start(struct ieee80211com *);
456 static void iwm_scan_end(struct ieee80211com *);
457 static void iwm_update_mcast(struct ieee80211com *);
458 static void iwm_set_channel(struct ieee80211com *);
459 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
460 static void iwm_scan_mindwell(struct ieee80211_scan_state *);
461 static int iwm_detach(device_t);
468 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
470 const struct iwm_fw_cscheme_list *l = (const void *)data;
472 if (dlen < sizeof(*l) ||
473 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
476 /* we don't actually store anything for now, always use s/w crypto */
482 iwm_firmware_store_section(struct iwm_softc *sc,
483 enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
485 struct iwm_fw_sects *fws;
486 struct iwm_fw_onesect *fwone;
488 if (type >= IWM_UCODE_TYPE_MAX)
490 if (dlen < sizeof(uint32_t))
493 fws = &sc->sc_fw.fw_sects[type];
494 if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
497 fwone = &fws->fw_sect[fws->fw_count];
499 /* first 32bit are device load offset */
500 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
503 fwone->fws_data = data + sizeof(uint32_t);
504 fwone->fws_len = dlen - sizeof(uint32_t);
511 #define IWM_DEFAULT_SCAN_CHANNELS 40
513 /* iwlwifi: iwl-drv.c */
514 struct iwm_tlv_calib_data {
516 struct iwm_tlv_calib_ctrl calib;
520 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
522 const struct iwm_tlv_calib_data *def_calib = data;
523 uint32_t ucode_type = le32toh(def_calib->ucode_type);
525 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
526 device_printf(sc->sc_dev,
527 "Wrong ucode_type %u for default "
528 "calibration.\n", ucode_type);
532 sc->sc_default_calib[ucode_type].flow_trigger =
533 def_calib->calib.flow_trigger;
534 sc->sc_default_calib[ucode_type].event_trigger =
535 def_calib->calib.event_trigger;
541 iwm_fw_info_free(struct iwm_fw_info *fw)
543 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
545 /* don't touch fw->fw_status */
546 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
550 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
552 struct iwm_fw_info *fw = &sc->sc_fw;
553 const struct iwm_tlv_ucode_header *uhdr;
554 struct iwm_ucode_tlv tlv;
555 enum iwm_ucode_tlv_type tlv_type;
556 const struct firmware *fwp;
558 uint32_t usniffer_img;
559 uint32_t paging_mem_size;
563 if (fw->fw_status == IWM_FW_STATUS_DONE &&
564 ucode_type != IWM_UCODE_INIT)
567 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
568 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
569 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
571 if (fw->fw_fp != NULL)
572 iwm_fw_info_free(fw);
575 * Load firmware into driver memory.
579 fwp = firmware_get(sc->cfg->fw_name);
582 device_printf(sc->sc_dev,
583 "could not read firmware %s (error %d)\n",
584 sc->cfg->fw_name, error);
589 /* (Re-)Initialize default values. */
590 sc->sc_capaflags = 0;
591 sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
592 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
593 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
596 * Parse firmware contents
599 uhdr = (const void *)fw->fw_fp->data;
600 if (*(const uint32_t *)fw->fw_fp->data != 0
601 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
602 device_printf(sc->sc_dev, "invalid firmware %s\n",
608 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
609 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
610 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
611 IWM_UCODE_API(le32toh(uhdr->ver)));
613 len = fw->fw_fp->datasize - sizeof(*uhdr);
615 while (len >= sizeof(tlv)) {
617 const void *tlv_data;
619 memcpy(&tlv, data, sizeof(tlv));
620 tlv_len = le32toh(tlv.length);
621 tlv_type = le32toh(tlv.type);
628 device_printf(sc->sc_dev,
629 "firmware too short: %zu bytes\n",
635 switch ((int)tlv_type) {
636 case IWM_UCODE_TLV_PROBE_MAX_LEN:
637 if (tlv_len < sizeof(uint32_t)) {
638 device_printf(sc->sc_dev,
639 "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
645 sc->sc_capa_max_probe_len
646 = le32toh(*(const uint32_t *)tlv_data);
647 /* limit it to something sensible */
648 if (sc->sc_capa_max_probe_len >
649 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
650 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
651 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
652 "ridiculous\n", __func__);
657 case IWM_UCODE_TLV_PAN:
659 device_printf(sc->sc_dev,
660 "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
666 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
668 case IWM_UCODE_TLV_FLAGS:
669 if (tlv_len < sizeof(uint32_t)) {
670 device_printf(sc->sc_dev,
671 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
678 * Apparently there can be many flags, but Linux driver
679 * parses only the first one, and so do we.
681 * XXX: why does this override IWM_UCODE_TLV_PAN?
682 * Intentional or a bug? Observations from
683 * current firmware file:
684 * 1) TLV_PAN is parsed first
685 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
686 * ==> this resets TLV_PAN to itself... hnnnk
688 sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
690 case IWM_UCODE_TLV_CSCHEME:
691 if ((error = iwm_store_cscheme(sc,
692 tlv_data, tlv_len)) != 0) {
693 device_printf(sc->sc_dev,
694 "%s: iwm_store_cscheme(): returned %d\n",
700 case IWM_UCODE_TLV_NUM_OF_CPU: {
702 if (tlv_len != sizeof(uint32_t)) {
703 device_printf(sc->sc_dev,
704 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
710 num_cpu = le32toh(*(const uint32_t *)tlv_data);
711 if (num_cpu < 1 || num_cpu > 2) {
712 device_printf(sc->sc_dev,
713 "%s: Driver supports only 1 or 2 CPUs\n",
720 case IWM_UCODE_TLV_SEC_RT:
721 if ((error = iwm_firmware_store_section(sc,
722 IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
723 device_printf(sc->sc_dev,
724 "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
730 case IWM_UCODE_TLV_SEC_INIT:
731 if ((error = iwm_firmware_store_section(sc,
732 IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
733 device_printf(sc->sc_dev,
734 "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
740 case IWM_UCODE_TLV_SEC_WOWLAN:
741 if ((error = iwm_firmware_store_section(sc,
742 IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
743 device_printf(sc->sc_dev,
744 "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
750 case IWM_UCODE_TLV_DEF_CALIB:
751 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
752 device_printf(sc->sc_dev,
753 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
756 (int) sizeof(struct iwm_tlv_calib_data));
760 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
761 device_printf(sc->sc_dev,
762 "%s: iwm_set_default_calib() failed: %d\n",
768 case IWM_UCODE_TLV_PHY_SKU:
769 if (tlv_len != sizeof(uint32_t)) {
771 device_printf(sc->sc_dev,
772 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
777 sc->sc_fw.phy_config =
778 le32toh(*(const uint32_t *)tlv_data);
779 sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
780 IWM_FW_PHY_CFG_TX_CHAIN) >>
781 IWM_FW_PHY_CFG_TX_CHAIN_POS;
782 sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
783 IWM_FW_PHY_CFG_RX_CHAIN) >>
784 IWM_FW_PHY_CFG_RX_CHAIN_POS;
787 case IWM_UCODE_TLV_API_CHANGES_SET: {
788 const struct iwm_ucode_api *api;
789 if (tlv_len != sizeof(*api)) {
793 api = (const struct iwm_ucode_api *)tlv_data;
794 /* Flags may exceed 32 bits in future firmware. */
795 if (le32toh(api->api_index) > 0) {
796 device_printf(sc->sc_dev,
797 "unsupported API index %d\n",
798 le32toh(api->api_index));
801 sc->sc_ucode_api = le32toh(api->api_flags);
805 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
806 const struct iwm_ucode_capa *capa;
808 if (tlv_len != sizeof(*capa)) {
812 capa = (const struct iwm_ucode_capa *)tlv_data;
813 idx = le32toh(capa->api_index);
814 if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
815 device_printf(sc->sc_dev,
816 "unsupported API index %d\n", idx);
819 for (i = 0; i < 32; i++) {
820 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
822 setbit(sc->sc_enabled_capa, i + (32 * idx));
827 case 48: /* undocumented TLV */
828 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
829 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
830 /* ignore, not used by current driver */
833 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
834 if ((error = iwm_firmware_store_section(sc,
835 IWM_UCODE_REGULAR_USNIFFER, tlv_data,
840 case IWM_UCODE_TLV_PAGING:
841 if (tlv_len != sizeof(uint32_t)) {
845 paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
847 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
848 "%s: Paging: paging enabled (size = %u bytes)\n",
849 __func__, paging_mem_size);
850 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
851 device_printf(sc->sc_dev,
852 "%s: Paging: driver supports up to %u bytes for paging image\n",
853 __func__, IWM_MAX_PAGING_IMAGE_SIZE);
857 if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
858 device_printf(sc->sc_dev,
859 "%s: Paging: image isn't multiple %u\n",
860 __func__, IWM_FW_PAGING_SIZE);
865 sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
867 usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
868 sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
872 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
873 if (tlv_len != sizeof(uint32_t)) {
877 sc->sc_capa_n_scan_channels =
878 le32toh(*(const uint32_t *)tlv_data);
881 case IWM_UCODE_TLV_FW_VERSION:
882 if (tlv_len != sizeof(uint32_t) * 3) {
886 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
888 le32toh(((const uint32_t *)tlv_data)[0]),
889 le32toh(((const uint32_t *)tlv_data)[1]),
890 le32toh(((const uint32_t *)tlv_data)[2]));
894 device_printf(sc->sc_dev,
895 "%s: unknown firmware section %d, abort\n",
901 len -= roundup(tlv_len, 4);
902 data += roundup(tlv_len, 4);
905 KASSERT(error == 0, ("unhandled error"));
909 device_printf(sc->sc_dev, "firmware parse error %d, "
910 "section type %d\n", error, tlv_type);
913 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
914 device_printf(sc->sc_dev,
915 "device uses unsupported power ops\n");
921 fw->fw_status = IWM_FW_STATUS_NONE;
922 if (fw->fw_fp != NULL)
923 iwm_fw_info_free(fw);
925 fw->fw_status = IWM_FW_STATUS_DONE;
932 * DMA resource routines
936 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
940 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
941 *(bus_addr_t *)arg = segs[0].ds_addr;
945 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
946 bus_size_t size, bus_size_t alignment)
955 error = bus_dma_tag_create(tag, alignment,
956 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
957 1, size, 0, NULL, NULL, &dma->tag);
961 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
962 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
966 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
967 iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
969 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
974 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
979 iwm_dma_contig_free(dma);
985 iwm_dma_contig_free(struct iwm_dma_info *dma)
987 if (dma->vaddr != NULL) {
988 bus_dmamap_sync(dma->tag, dma->map,
989 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
990 bus_dmamap_unload(dma->tag, dma->map);
991 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
994 if (dma->tag != NULL) {
995 bus_dma_tag_destroy(dma->tag);
1000 /* fwmem is used to load firmware onto the card */
1002 iwm_alloc_fwmem(struct iwm_softc *sc)
1004 /* Must be aligned on a 16-byte boundary. */
1005 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
1006 sc->sc_fwdmasegsz, 16);
1009 /* tx scheduler rings. not used? */
1011 iwm_alloc_sched(struct iwm_softc *sc)
1013 /* TX scheduler rings must be aligned on a 1KB boundary. */
1014 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
1015 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
1018 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */
1020 iwm_alloc_kw(struct iwm_softc *sc)
1022 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
1025 /* interrupt cause table */
1027 iwm_alloc_ict(struct iwm_softc *sc)
1029 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
1030 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
1034 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1041 /* Allocate RX descriptors (256-byte aligned). */
1042 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1043 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1045 device_printf(sc->sc_dev,
1046 "could not allocate RX ring DMA memory\n");
1049 ring->desc = ring->desc_dma.vaddr;
1051 /* Allocate RX status area (16-byte aligned). */
1052 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1053 sizeof(*ring->stat), 16);
1055 device_printf(sc->sc_dev,
1056 "could not allocate RX status DMA memory\n");
1059 ring->stat = ring->stat_dma.vaddr;
1061 /* Create RX buffer DMA tag. */
1062 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1063 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1064 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1066 device_printf(sc->sc_dev,
1067 "%s: could not create RX buf DMA tag, error %d\n",
1072 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1073 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1075 device_printf(sc->sc_dev,
1076 "%s: could not create RX buf DMA map, error %d\n",
1081 * Allocate and map RX buffers.
1083 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1084 struct iwm_rx_data *data = &ring->data[i];
1085 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1087 device_printf(sc->sc_dev,
1088 "%s: could not create RX buf DMA map, error %d\n",
1094 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1100 fail: iwm_free_rx_ring(sc, ring);
1105 iwm_disable_rx_dma(struct iwm_softc *sc)
1107 /* XXX conditional nic locks are stupid */
1108 /* XXX print out if we can't lock the NIC? */
1109 if (iwm_nic_lock(sc)) {
1110 /* XXX handle if RX stop doesn't finish? */
1111 (void) iwm_pcie_rx_stop(sc);
1117 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1119 /* Reset the ring state */
1123 * The hw rx ring index in shared memory must also be cleared,
1124 * otherwise the discrepancy can cause reprocessing chaos.
1126 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1130 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1134 iwm_dma_contig_free(&ring->desc_dma);
1135 iwm_dma_contig_free(&ring->stat_dma);
1137 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1138 struct iwm_rx_data *data = &ring->data[i];
1140 if (data->m != NULL) {
1141 bus_dmamap_sync(ring->data_dmat, data->map,
1142 BUS_DMASYNC_POSTREAD);
1143 bus_dmamap_unload(ring->data_dmat, data->map);
1147 if (data->map != NULL) {
1148 bus_dmamap_destroy(ring->data_dmat, data->map);
1152 if (ring->spare_map != NULL) {
1153 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1154 ring->spare_map = NULL;
1156 if (ring->data_dmat != NULL) {
1157 bus_dma_tag_destroy(ring->data_dmat);
1158 ring->data_dmat = NULL;
1163 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1175 /* Allocate TX descriptors (256-byte aligned). */
1176 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1177 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1179 device_printf(sc->sc_dev,
1180 "could not allocate TX ring DMA memory\n");
1183 ring->desc = ring->desc_dma.vaddr;
1186 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1187 * to allocate commands space for other rings.
1189 if (qid > IWM_MVM_CMD_QUEUE)
1192 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1193 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1195 device_printf(sc->sc_dev,
1196 "could not allocate TX cmd DMA memory\n");
1199 ring->cmd = ring->cmd_dma.vaddr;
1201 /* FW commands may require more mapped space than packets. */
1202 if (qid == IWM_MVM_CMD_QUEUE) {
1203 maxsize = IWM_RBUF_SIZE;
1207 nsegments = IWM_MAX_SCATTER - 2;
1210 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1211 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1212 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1214 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1218 paddr = ring->cmd_dma.paddr;
1219 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1220 struct iwm_tx_data *data = &ring->data[i];
1222 data->cmd_paddr = paddr;
1223 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1224 + offsetof(struct iwm_tx_cmd, scratch);
1225 paddr += sizeof(struct iwm_device_cmd);
1227 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1229 device_printf(sc->sc_dev,
1230 "could not create TX buf DMA map\n");
1234 KASSERT(paddr == ring->cmd_dma.paddr + size,
1235 ("invalid physical address"));
1238 fail: iwm_free_tx_ring(sc, ring);
1243 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1247 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1248 struct iwm_tx_data *data = &ring->data[i];
1250 if (data->m != NULL) {
1251 bus_dmamap_sync(ring->data_dmat, data->map,
1252 BUS_DMASYNC_POSTWRITE);
1253 bus_dmamap_unload(ring->data_dmat, data->map);
1258 /* Clear TX descriptors. */
1259 memset(ring->desc, 0, ring->desc_dma.size);
1260 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1261 BUS_DMASYNC_PREWRITE);
1262 sc->qfullmsk &= ~(1 << ring->qid);
1268 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1272 iwm_dma_contig_free(&ring->desc_dma);
1273 iwm_dma_contig_free(&ring->cmd_dma);
1275 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1276 struct iwm_tx_data *data = &ring->data[i];
1278 if (data->m != NULL) {
1279 bus_dmamap_sync(ring->data_dmat, data->map,
1280 BUS_DMASYNC_POSTWRITE);
1281 bus_dmamap_unload(ring->data_dmat, data->map);
1285 if (data->map != NULL) {
1286 bus_dmamap_destroy(ring->data_dmat, data->map);
1290 if (ring->data_dmat != NULL) {
1291 bus_dma_tag_destroy(ring->data_dmat);
1292 ring->data_dmat = NULL;
1297 * High-level hardware frobbing routines
1301 iwm_enable_interrupts(struct iwm_softc *sc)
1303 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1304 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1308 iwm_restore_interrupts(struct iwm_softc *sc)
1310 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1314 iwm_disable_interrupts(struct iwm_softc *sc)
1316 /* disable interrupts */
1317 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1319 /* acknowledge all interrupts */
1320 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1321 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1325 iwm_ict_reset(struct iwm_softc *sc)
1327 iwm_disable_interrupts(sc);
1329 /* Reset ICT table. */
1330 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1333 /* Set physical address of ICT table (4KB aligned). */
1334 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1335 IWM_CSR_DRAM_INT_TBL_ENABLE
1336 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1337 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1338 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1340 /* Switch to ICT interrupt mode in driver. */
1341 sc->sc_flags |= IWM_FLAG_USE_ICT;
1343 /* Re-enable interrupts. */
1344 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1345 iwm_enable_interrupts(sc);
1348 /* iwlwifi pcie/trans.c */
1351 * Since this .. hard-resets things, it's time to actually
1352 * mark the first vap (if any) as having no mac context.
1353 * It's annoying, but since the driver is potentially being
1354 * stop/start'ed whilst active (thanks openbsd port!) we
1355 * have to correctly track this.
1358 iwm_stop_device(struct iwm_softc *sc)
1360 struct ieee80211com *ic = &sc->sc_ic;
1361 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1365 /* tell the device to stop sending interrupts */
1366 iwm_disable_interrupts(sc);
1369 * FreeBSD-local: mark the first vap as not-uploaded,
1370 * so the next transition through auth/assoc
1371 * will correctly populate the MAC context.
1374 struct iwm_vap *iv = IWM_VAP(vap);
1375 iv->is_uploaded = 0;
1378 /* device going down, Stop using ICT table */
1379 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1381 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1383 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1385 if (iwm_nic_lock(sc)) {
1386 /* Stop each Tx DMA channel */
1387 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1389 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1390 mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1393 /* Wait for DMA channels to be idle */
1394 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1396 device_printf(sc->sc_dev,
1397 "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1398 IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1402 iwm_disable_rx_dma(sc);
1405 iwm_reset_rx_ring(sc, &sc->rxq);
1407 /* Reset all TX rings. */
1408 for (qid = 0; qid < nitems(sc->txq); qid++)
1409 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1412 * Power-down device's busmaster DMA clocks
1414 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1417 /* Make sure (redundant) we've released our request to stay awake */
1418 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1419 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1421 /* Stop the device, and put it in low power state */
1424 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1425 * Clean again the interrupt here
1427 iwm_disable_interrupts(sc);
1428 /* stop and reset the on-board processor */
1429 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1432 * Even if we stop the HW, we still want the RF kill
1435 iwm_enable_rfkill_int(sc);
1436 iwm_check_rfkill(sc);
1439 /* iwlwifi: mvm/ops.c */
1441 iwm_mvm_nic_config(struct iwm_softc *sc)
1443 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1444 uint32_t reg_val = 0;
1445 uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1447 radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1448 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1449 radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1450 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1451 radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1452 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1455 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1456 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1457 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1458 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1460 /* radio configuration */
1461 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1462 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1463 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1465 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1467 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1468 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1469 radio_cfg_step, radio_cfg_dash);
1472 * W/A : NIC is stuck in a reset state after Early PCIe power off
1473 * (PCIe power is lost before PERST# is asserted), causing ME FW
1474 * to lose ownership and not being able to obtain it back.
1476 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1477 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1478 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1479 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1484 iwm_nic_rx_init(struct iwm_softc *sc)
1486 if (!iwm_nic_lock(sc))
1490 * Initialize RX ring. This is from the iwn driver.
1492 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1495 iwm_disable_rx_dma(sc);
1496 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1497 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1498 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1499 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1501 /* Set physical address of RX ring (256-byte aligned). */
1503 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1505 /* Set physical address of RX status (16-byte aligned). */
1507 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1510 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1511 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1512 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1513 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1514 IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
1515 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1516 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1517 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1519 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1521 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1522 if (sc->cfg->host_interrupt_operation_mode)
1523 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1526 * Thus sayeth el jefe (iwlwifi) via a comment:
1528 * This value should initially be 0 (before preparing any
1529 * RBs), should be 8 after preparing the first 8 RBs (for example)
1531 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1539 iwm_nic_tx_init(struct iwm_softc *sc)
1543 if (!iwm_nic_lock(sc))
1546 /* Deactivate TX scheduler. */
1547 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1549 /* Set physical address of "keep warm" page (16-byte aligned). */
1550 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1552 /* Initialize TX rings. */
1553 for (qid = 0; qid < nitems(sc->txq); qid++) {
1554 struct iwm_tx_ring *txq = &sc->txq[qid];
1556 /* Set physical address of TX ring (256-byte aligned). */
1557 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1558 txq->desc_dma.paddr >> 8);
1559 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1560 "%s: loading ring %d descriptors (%p) at %lx\n",
1563 (unsigned long) (txq->desc_dma.paddr >> 8));
1566 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1574 iwm_nic_init(struct iwm_softc *sc)
1579 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1582 iwm_mvm_nic_config(sc);
1584 if ((error = iwm_nic_rx_init(sc)) != 0)
1588 * Ditto for TX, from iwn
1590 if ((error = iwm_nic_tx_init(sc)) != 0)
1593 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1594 "%s: shadow registers enabled\n", __func__);
1595 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1600 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1608 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1610 if (!iwm_nic_lock(sc)) {
1611 device_printf(sc->sc_dev,
1612 "%s: cannot enable txq %d\n",
1618 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1620 if (qid == IWM_MVM_CMD_QUEUE) {
1621 /* unactivate before configuration */
1622 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1623 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1624 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1626 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1628 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1630 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1631 /* Set scheduler window size and frame limit. */
1633 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1635 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1636 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1637 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1638 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1640 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1641 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1642 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1643 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1644 IWM_SCD_QUEUE_STTS_REG_MSK);
1646 struct iwm_scd_txq_cfg_cmd cmd;
1651 memset(&cmd, 0, sizeof(cmd));
1652 cmd.scd_queue = qid;
1654 cmd.sta_id = sta_id;
1657 cmd.window = IWM_FRAME_LIMIT;
1659 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1662 device_printf(sc->sc_dev,
1663 "cannot enable txq %d\n", qid);
1667 if (!iwm_nic_lock(sc))
1671 iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1672 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1676 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1677 __func__, qid, fifo);
1683 iwm_post_alive(struct iwm_softc *sc)
1689 if (!iwm_nic_lock(sc))
1692 base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1693 if (sc->sched_base != base) {
1694 device_printf(sc->sc_dev,
1695 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1696 __func__, sc->sched_base, base);
1701 /* Clear TX scheduler state in SRAM. */
1702 nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1703 IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1705 error = iwm_write_mem(sc,
1706 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1711 /* Set physical address of TX scheduler rings (1KB aligned). */
1712 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1714 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1718 /* enable command channel */
1719 error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1723 if (!iwm_nic_lock(sc))
1726 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1728 /* Enable DMA channels. */
1729 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1730 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1731 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1732 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1735 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1736 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1738 /* Enable L1-Active */
1739 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1740 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1741 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1750 * NVM read access and content parsing. We do not support
1751 * external NVM or writing NVM.
1755 /* Default NVM size to read */
1756 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1758 #define IWM_NVM_WRITE_OPCODE 1
1759 #define IWM_NVM_READ_OPCODE 0
1761 /* load nvm chunk response */
1763 IWM_READ_NVM_CHUNK_SUCCEED = 0,
1764 IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1768 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1769 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1771 struct iwm_nvm_access_cmd nvm_access_cmd = {
1772 .offset = htole16(offset),
1773 .length = htole16(length),
1774 .type = htole16(section),
1775 .op_code = IWM_NVM_READ_OPCODE,
1777 struct iwm_nvm_access_resp *nvm_resp;
1778 struct iwm_rx_packet *pkt;
1779 struct iwm_host_cmd cmd = {
1780 .id = IWM_NVM_ACCESS_CMD,
1781 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1782 .data = { &nvm_access_cmd, },
1784 int ret, bytes_read, offset_read;
1787 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1789 ret = iwm_send_cmd(sc, &cmd);
1791 device_printf(sc->sc_dev,
1792 "Could not send NVM_ACCESS command (error=%d)\n", ret);
1798 /* Extract NVM response */
1799 nvm_resp = (void *)pkt->data;
1800 ret = le16toh(nvm_resp->status);
1801 bytes_read = le16toh(nvm_resp->length);
1802 offset_read = le16toh(nvm_resp->offset);
1803 resp_data = nvm_resp->data;
1805 if ((offset != 0) &&
1806 (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1808 * meaning of NOT_VALID_ADDRESS:
1809 * driver try to read chunk from address that is
1810 * multiple of 2K and got an error since addr is empty.
1811 * meaning of (offset != 0): driver already
1812 * read valid data from another chunk so this case
1815 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1816 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1821 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1822 "NVM access command failed with status %d\n", ret);
1828 if (offset_read != offset) {
1829 device_printf(sc->sc_dev,
1830 "NVM ACCESS response with invalid offset %d\n",
1836 if (bytes_read > length) {
1837 device_printf(sc->sc_dev,
1838 "NVM ACCESS response with too much data "
1839 "(%d bytes requested, %d bytes received)\n",
1840 length, bytes_read);
1845 /* Write data to NVM */
1846 memcpy(data + offset, resp_data, bytes_read);
1850 iwm_free_resp(sc, &cmd);
1855 * Reads an NVM section completely.
1856 * NICs prior to 7000 family don't have a real NVM, but just read
1857 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1858 * by uCode, we need to manually check in this case that we don't
1859 * overflow and try to read more than the EEPROM size.
1860 * For 7000 family NICs, we supply the maximal size we can read, and
1861 * the uCode fills the response with as much data as we can,
1862 * without overflowing, so no check is needed.
1865 iwm_nvm_read_section(struct iwm_softc *sc,
1866 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1868 uint16_t seglen, length, offset = 0;
1871 /* Set nvm section read length */
1872 length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1876 /* Read the NVM until exhausted (reading less than requested) */
1877 while (seglen == length) {
1878 /* Check no memory assumptions fail and cause an overflow */
1879 if ((size_read + offset + length) >
1880 sc->cfg->eeprom_size) {
1881 device_printf(sc->sc_dev,
1882 "EEPROM size is too small for NVM\n");
1886 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1888 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1889 "Cannot read NVM from section %d offset %d, length %d\n",
1890 section, offset, length);
1896 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1897 "NVM section %d read completed\n", section);
1903 * BEGIN IWM_NVM_PARSE
1906 /* iwlwifi/iwl-nvm-parse.c */
1908 /* NVM offsets (in words) definitions */
1909 enum iwm_nvm_offsets {
1910 /* NVM HW-Section offset (in words) definitions */
1913 /* NVM SW-Section offset (in words) definitions */
1914 IWM_NVM_SW_SECTION = 0x1C0,
1915 IWM_NVM_VERSION = 0,
1919 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1921 /* NVM calibration section offset (in words) definitions */
1922 IWM_NVM_CALIB_SECTION = 0x2B8,
1923 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1926 enum iwm_8000_nvm_offsets {
1927 /* NVM HW-Section offset (in words) definitions */
1928 IWM_HW_ADDR0_WFPM_8000 = 0x12,
1929 IWM_HW_ADDR1_WFPM_8000 = 0x16,
1930 IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1931 IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1932 IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1934 /* NVM SW-Section offset (in words) definitions */
1935 IWM_NVM_SW_SECTION_8000 = 0x1C0,
1936 IWM_NVM_VERSION_8000 = 0,
1937 IWM_RADIO_CFG_8000 = 0,
1939 IWM_N_HW_ADDRS_8000 = 3,
1941 /* NVM REGULATORY -Section offset (in words) definitions */
1942 IWM_NVM_CHANNELS_8000 = 0,
1943 IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1944 IWM_NVM_LAR_OFFSET_8000 = 0x507,
1945 IWM_NVM_LAR_ENABLED_8000 = 0x7,
1947 /* NVM calibration section offset (in words) definitions */
1948 IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1949 IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1952 /* SKU Capabilities (actual values from NVM definition) */
1954 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
1955 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
1956 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
1957 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
1960 /* radio config bits (actual values from NVM definition) */
1961 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
1962 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
1963 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
1964 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
1965 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
1966 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1968 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x) (x & 0xF)
1969 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x) ((x >> 4) & 0xF)
1970 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x) ((x >> 8) & 0xF)
1971 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x) ((x >> 12) & 0xFFF)
1972 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x) ((x >> 24) & 0xF)
1973 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x) ((x >> 28) & 0xF)
1975 #define DEFAULT_MAX_TX_POWER 16
1978 * enum iwm_nvm_channel_flags - channel flags in NVM
1979 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1980 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1981 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1982 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1983 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1984 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1985 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1986 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1987 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1988 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1990 enum iwm_nvm_channel_flags {
1991 IWM_NVM_CHANNEL_VALID = (1 << 0),
1992 IWM_NVM_CHANNEL_IBSS = (1 << 1),
1993 IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1994 IWM_NVM_CHANNEL_RADAR = (1 << 4),
1995 IWM_NVM_CHANNEL_DFS = (1 << 7),
1996 IWM_NVM_CHANNEL_WIDE = (1 << 8),
1997 IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1998 IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1999 IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2003 * Translate EEPROM flags to net80211.
2006 iwm_eeprom_channel_flags(uint16_t ch_flags)
2011 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2012 nflags |= IEEE80211_CHAN_PASSIVE;
2013 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2014 nflags |= IEEE80211_CHAN_NOADHOC;
2015 if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2016 nflags |= IEEE80211_CHAN_DFS;
2018 nflags |= IEEE80211_CHAN_NOADHOC;
2025 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2026 int maxchans, int *nchans, int ch_idx, size_t ch_num,
2027 const uint8_t bands[])
2029 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2035 for (; ch_idx < ch_num; ch_idx++) {
2036 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2037 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2038 ieee = iwm_nvm_channels[ch_idx];
2040 ieee = iwm_nvm_channels_8000[ch_idx];
2042 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2043 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2044 "Ch. %d Flags %x [%sGHz] - No traffic\n",
2046 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2051 nflags = iwm_eeprom_channel_flags(ch_flags);
2052 error = ieee80211_add_channel(chans, maxchans, nchans,
2053 ieee, 0, 0, nflags, bands);
2057 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2058 "Ch. %d Flags %x [%sGHz] - Added\n",
2060 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2066 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2067 struct ieee80211_channel chans[])
2069 struct iwm_softc *sc = ic->ic_softc;
2070 struct iwm_nvm_data *data = sc->nvm_data;
2071 uint8_t bands[IEEE80211_MODE_BYTES];
2074 memset(bands, 0, sizeof(bands));
2075 /* 1-13: 11b/g channels. */
2076 setbit(bands, IEEE80211_MODE_11B);
2077 setbit(bands, IEEE80211_MODE_11G);
2078 iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2079 IWM_NUM_2GHZ_CHANNELS - 1, bands);
2081 /* 14: 11b channel only. */
2082 clrbit(bands, IEEE80211_MODE_11G);
2083 iwm_add_channel_band(sc, chans, maxchans, nchans,
2084 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2086 if (data->sku_cap_band_52GHz_enable) {
2087 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2088 ch_num = nitems(iwm_nvm_channels);
2090 ch_num = nitems(iwm_nvm_channels_8000);
2091 memset(bands, 0, sizeof(bands));
2092 setbit(bands, IEEE80211_MODE_11A);
2093 iwm_add_channel_band(sc, chans, maxchans, nchans,
2094 IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2099 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2100 const uint16_t *mac_override, const uint16_t *nvm_hw)
2102 const uint8_t *hw_addr;
2105 static const uint8_t reserved_mac[] = {
2106 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2109 hw_addr = (const uint8_t *)(mac_override +
2110 IWM_MAC_ADDRESS_OVERRIDE_8000);
2113 * Store the MAC address from MAO section.
2114 * No byte swapping is required in MAO section
2116 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2119 * Force the use of the OTP MAC address in case of reserved MAC
2120 * address in the NVM, or if address is given but invalid.
2122 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2123 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2124 iwm_is_valid_ether_addr(data->hw_addr) &&
2125 !IEEE80211_IS_MULTICAST(data->hw_addr))
2128 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2129 "%s: mac address from nvm override section invalid\n",
2134 /* read the mac address from WFMP registers */
2135 uint32_t mac_addr0 =
2136 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2137 uint32_t mac_addr1 =
2138 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2140 hw_addr = (const uint8_t *)&mac_addr0;
2141 data->hw_addr[0] = hw_addr[3];
2142 data->hw_addr[1] = hw_addr[2];
2143 data->hw_addr[2] = hw_addr[1];
2144 data->hw_addr[3] = hw_addr[0];
2146 hw_addr = (const uint8_t *)&mac_addr1;
2147 data->hw_addr[4] = hw_addr[1];
2148 data->hw_addr[5] = hw_addr[0];
2153 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2154 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2158 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2159 const uint16_t *phy_sku)
2161 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2162 return le16_to_cpup(nvm_sw + IWM_SKU);
2164 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2168 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2170 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2171 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2173 return le32_to_cpup((const uint32_t *)(nvm_sw +
2174 IWM_NVM_VERSION_8000));
2178 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2179 const uint16_t *phy_sku)
2181 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2182 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2184 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2188 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2192 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2193 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2195 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2197 return n_hw_addr & IWM_N_HW_ADDR_MASK;
2201 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2204 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2205 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2206 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2207 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2208 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2212 /* set the radio configuration for family 8000 */
2213 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2214 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2215 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2216 data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2217 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2218 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2222 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2223 const uint16_t *nvm_hw, const uint16_t *mac_override)
2225 #ifdef notyet /* for FAMILY 9000 */
2226 if (cfg->mac_addr_from_csr) {
2227 iwm_set_hw_address_from_csr(sc, data);
2230 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2231 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2233 /* The byte order is little endian 16 bit, meaning 214365 */
2234 data->hw_addr[0] = hw_addr[1];
2235 data->hw_addr[1] = hw_addr[0];
2236 data->hw_addr[2] = hw_addr[3];
2237 data->hw_addr[3] = hw_addr[2];
2238 data->hw_addr[4] = hw_addr[5];
2239 data->hw_addr[5] = hw_addr[4];
2241 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2244 if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2245 device_printf(sc->sc_dev, "no valid mac address was found\n");
2252 static struct iwm_nvm_data *
2253 iwm_parse_nvm_data(struct iwm_softc *sc,
2254 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2255 const uint16_t *nvm_calib, const uint16_t *mac_override,
2256 const uint16_t *phy_sku, const uint16_t *regulatory)
2258 struct iwm_nvm_data *data;
2259 uint32_t sku, radio_cfg;
2261 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2262 data = malloc(sizeof(*data) +
2263 IWM_NUM_CHANNELS * sizeof(uint16_t),
2264 M_DEVBUF, M_NOWAIT | M_ZERO);
2266 data = malloc(sizeof(*data) +
2267 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2268 M_DEVBUF, M_NOWAIT | M_ZERO);
2273 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2275 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2276 iwm_set_radio_cfg(sc, data, radio_cfg);
2278 sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2279 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2280 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2281 data->sku_cap_11n_enable = 0;
2283 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2285 /* If no valid mac address was found - bail out */
2286 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2287 free(data, M_DEVBUF);
2291 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2292 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2293 IWM_NUM_CHANNELS * sizeof(uint16_t));
2295 memcpy(data->nvm_ch_flags, ®ulatory[IWM_NVM_CHANNELS_8000],
2296 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2303 iwm_free_nvm_data(struct iwm_nvm_data *data)
2306 free(data, M_DEVBUF);
2309 static struct iwm_nvm_data *
2310 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2312 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2314 /* Checking for required sections */
2315 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2316 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2317 !sections[sc->cfg->nvm_hw_section_num].data) {
2318 device_printf(sc->sc_dev,
2319 "Can't parse empty OTP/NVM sections\n");
2322 } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2323 /* SW and REGULATORY sections are mandatory */
2324 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2325 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2326 device_printf(sc->sc_dev,
2327 "Can't parse empty OTP/NVM sections\n");
2330 /* MAC_OVERRIDE or at least HW section must exist */
2331 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2332 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2333 device_printf(sc->sc_dev,
2334 "Can't parse mac_address, empty sections\n");
2338 /* PHY_SKU section is mandatory in B0 */
2339 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2340 device_printf(sc->sc_dev,
2341 "Can't parse phy_sku in B0, empty sections\n");
2345 panic("unknown device family %d\n", sc->cfg->device_family);
2348 hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2349 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2350 calib = (const uint16_t *)
2351 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2352 regulatory = (const uint16_t *)
2353 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2354 mac_override = (const uint16_t *)
2355 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2356 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2358 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2359 phy_sku, regulatory);
2363 iwm_nvm_init(struct iwm_softc *sc)
2365 struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2366 int i, ret, section;
2367 uint32_t size_read = 0;
2368 uint8_t *nvm_buffer, *temp;
2371 memset(nvm_sections, 0, sizeof(nvm_sections));
2373 if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2376 /* load NVM values from nic */
2377 /* Read From FW NVM */
2378 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2380 nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2383 for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2384 /* we override the constness for initial read */
2385 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2390 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2395 memcpy(temp, nvm_buffer, len);
2397 nvm_sections[section].data = temp;
2398 nvm_sections[section].length = len;
2401 device_printf(sc->sc_dev, "OTP is blank\n");
2402 free(nvm_buffer, M_DEVBUF);
2404 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2407 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2408 "nvm version = %x\n", sc->nvm_data->nvm_version);
2410 for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2411 if (nvm_sections[i].data != NULL)
2412 free(nvm_sections[i].data, M_DEVBUF);
2419 * Firmware loading gunk. This is kind of a weird hybrid between the
2420 * iwn driver and the Linux iwlwifi driver.
2424 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2425 const uint8_t *section, uint32_t byte_cnt)
2428 uint32_t chunk_sz, offset;
2430 chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2432 for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2434 const uint8_t *data;
2436 addr = dst_addr + offset;
2437 len = MIN(chunk_sz, byte_cnt - offset);
2438 data = section + offset;
2440 error = iwm_firmware_load_chunk(sc, addr, data, len);
2449 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2450 const uint8_t *chunk, uint32_t byte_cnt)
2452 struct iwm_dma_info *dma = &sc->fw_dma;
2455 /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2456 memcpy(dma->vaddr, chunk, byte_cnt);
2457 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2459 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2460 dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2461 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2462 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2465 sc->sc_fw_chunk_done = 0;
2467 if (!iwm_nic_lock(sc))
2470 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2471 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2472 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2474 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2475 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2476 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2477 (iwm_get_dma_hi_addr(dma->paddr)
2478 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2479 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2480 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2481 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2482 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2483 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2484 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2485 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2486 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2490 /* wait 1s for this segment to load */
2491 while (!sc->sc_fw_chunk_done)
2492 if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
2495 if (!sc->sc_fw_chunk_done) {
2496 device_printf(sc->sc_dev,
2497 "fw chunk addr 0x%x len %d failed to load\n",
2498 dst_addr, byte_cnt);
2501 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2502 dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2503 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2504 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2512 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2513 int cpu, int *first_ucode_section)
2516 int i, error = 0, sec_num = 0x1;
2517 uint32_t val, last_read_idx = 0;
2524 *first_ucode_section = 0;
2527 (*first_ucode_section)++;
2530 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2532 data = fws->fw_sect[i].fws_data;
2533 dlen = fws->fw_sect[i].fws_len;
2534 offset = fws->fw_sect[i].fws_devoff;
2537 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2539 * PAGING_SEPARATOR_SECTION delimiter - separate between
2540 * CPU2 non paged to CPU2 paging sec.
2542 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2543 offset == IWM_PAGING_SEPARATOR_SECTION)
2546 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2547 "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2548 i, offset, dlen, cpu);
2550 if (dlen > sc->sc_fwdmasegsz) {
2551 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2552 "chunk %d too large (%d bytes)\n", i, dlen);
2555 error = iwm_firmware_load_sect(sc, offset, data, dlen);
2558 device_printf(sc->sc_dev,
2559 "could not load firmware chunk %d (error %d)\n",
2564 /* Notify the ucode of the loaded section number and status */
2565 if (iwm_nic_lock(sc)) {
2566 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2567 val = val | (sec_num << shift_param);
2568 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2569 sec_num = (sec_num << 1) | 0x1;
2573 * The firmware won't load correctly without this delay.
2579 *first_ucode_section = last_read_idx;
2581 if (iwm_nic_lock(sc)) {
2583 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2585 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2593 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2595 struct iwm_fw_sects *fws;
2597 int first_ucode_section;
2599 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2602 fws = &sc->sc_fw.fw_sects[ucode_type];
2604 /* configure the ucode to be ready to get the secured image */
2605 /* release CPU reset */
2606 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2608 /* load to FW the binary Secured sections of CPU1 */
2609 error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2613 /* load to FW the binary sections of CPU2 */
2614 return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2618 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2620 struct iwm_fw_sects *fws;
2626 sc->sc_uc.uc_intr = 0;
2628 fws = &sc->sc_fw.fw_sects[ucode_type];
2629 for (i = 0; i < fws->fw_count; i++) {
2630 data = fws->fw_sect[i].fws_data;
2631 dlen = fws->fw_sect[i].fws_len;
2632 offset = fws->fw_sect[i].fws_devoff;
2633 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2634 "LOAD FIRMWARE type %d offset %u len %d\n",
2635 ucode_type, offset, dlen);
2636 if (dlen > sc->sc_fwdmasegsz) {
2637 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2638 "chunk %d too large (%d bytes)\n", i, dlen);
2641 error = iwm_firmware_load_sect(sc, offset, data, dlen);
2644 device_printf(sc->sc_dev,
2645 "could not load firmware chunk %u of %u "
2646 "(error=%d)\n", i, fws->fw_count, error);
2651 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2657 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2661 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2662 error = iwm_load_firmware_8000(sc, ucode_type);
2664 error = iwm_load_firmware_7000(sc, ucode_type);
2668 /* wait for the firmware to load */
2669 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2670 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2672 if (error || !sc->sc_uc.uc_ok) {
2673 device_printf(sc->sc_dev, "could not load firmware\n");
2674 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2675 device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2676 iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2677 device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2678 iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2683 * Give the firmware some time to initialize.
2684 * Accessing it too early causes errors.
2686 msleep(&w, &sc->sc_mtx, 0, "iwmfwinit", hz);
2691 /* iwlwifi: pcie/trans.c */
2693 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2697 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2699 if ((error = iwm_nic_init(sc)) != 0) {
2700 device_printf(sc->sc_dev, "unable to init nic\n");
2704 /* make sure rfkill handshake bits are cleared */
2705 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2706 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2707 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2709 /* clear (again), then enable host interrupts */
2710 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2711 iwm_enable_interrupts(sc);
2713 /* really make sure rfkill handshake bits are cleared */
2714 /* maybe we should write a few times more? just to make sure */
2715 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2716 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2718 /* Load the given image to the HW */
2719 return iwm_load_firmware(sc, ucode_type);
2723 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2725 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2726 .valid = htole32(valid_tx_ant),
2729 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2730 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2733 /* iwlwifi: mvm/fw.c */
2735 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2737 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2738 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2740 /* Set parameters */
2741 phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2742 phy_cfg_cmd.calib_control.event_trigger =
2743 sc->sc_default_calib[ucode_type].event_trigger;
2744 phy_cfg_cmd.calib_control.flow_trigger =
2745 sc->sc_default_calib[ucode_type].flow_trigger;
2747 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2748 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2749 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2750 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2754 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2755 enum iwm_ucode_type ucode_type)
2757 enum iwm_ucode_type old_type = sc->sc_uc_current;
2760 if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2761 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2766 sc->sc_uc_current = ucode_type;
2767 error = iwm_start_fw(sc, ucode_type);
2769 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2770 sc->sc_uc_current = old_type;
2774 error = iwm_post_alive(sc);
2776 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2786 * follows iwlwifi/fw.c
2789 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2793 /* do not operate with rfkill switch turned on */
2794 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2795 device_printf(sc->sc_dev,
2796 "radio is disabled by hardware switch\n");
2800 sc->sc_init_complete = 0;
2801 if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2802 IWM_UCODE_INIT)) != 0) {
2803 device_printf(sc->sc_dev, "failed to load init firmware\n");
2808 if ((error = iwm_nvm_init(sc)) != 0) {
2809 device_printf(sc->sc_dev, "failed to read nvm\n");
2812 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2817 if ((error = iwm_send_bt_init_conf(sc)) != 0) {
2818 device_printf(sc->sc_dev,
2819 "failed to send bt coex configuration: %d\n", error);
2823 /* Init Smart FIFO. */
2824 error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2829 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2830 "%s: phy_txant=0x%08x, nvm_valid_tx_ant=0x%02x, valid=0x%02x\n",
2832 ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2833 >> IWM_FW_PHY_CFG_TX_CHAIN_POS),
2834 sc->nvm_data->valid_tx_ant,
2835 iwm_fw_valid_tx_ant(sc));
2838 /* Send TX valid antennas before triggering calibrations */
2839 error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
2841 device_printf(sc->sc_dev,
2842 "failed to send antennas before calibration: %d\n", error);
2847 * Send phy configurations command to init uCode
2848 * to start the 16.0 uCode init image internal calibrations.
2850 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2851 device_printf(sc->sc_dev,
2852 "%s: failed to run internal calibration: %d\n",
2858 * Nothing to do but wait for the init complete notification
2861 while (!sc->sc_init_complete) {
2862 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2863 0, "iwminit", 2*hz);
2865 device_printf(sc->sc_dev, "init complete failed: %d\n",
2866 sc->sc_init_complete);
2871 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
2872 sc->sc_init_complete ? "" : "not ");
2881 /* (re)stock rx ring, called at init-time and at runtime */
2883 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2885 struct iwm_rx_ring *ring = &sc->rxq;
2886 struct iwm_rx_data *data = &ring->data[idx];
2888 bus_dmamap_t dmamap = NULL;
2889 bus_dma_segment_t seg;
2892 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2896 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2897 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2898 &seg, &nsegs, BUS_DMA_NOWAIT);
2900 device_printf(sc->sc_dev,
2901 "%s: can't map mbuf, error %d\n", __func__, error);
2905 if (data->m != NULL)
2906 bus_dmamap_unload(ring->data_dmat, data->map);
2908 /* Swap ring->spare_map with data->map */
2910 data->map = ring->spare_map;
2911 ring->spare_map = dmamap;
2913 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2916 /* Update RX descriptor. */
2917 KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
2918 ring->desc[idx] = htole32(seg.ds_addr >> 8);
2919 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2920 BUS_DMASYNC_PREWRITE);
2928 /* iwlwifi: mvm/rx.c */
2929 #define IWM_RSSI_OFFSET 50
2931 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2933 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2934 uint32_t agc_a, agc_b;
2937 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2938 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2939 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2941 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2942 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2943 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2946 * dBm = rssi dB - agc dB - constant.
2947 * Higher AGC (higher radio gain) means lower signal.
2949 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2950 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2951 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2953 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2954 "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2955 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2957 return max_rssi_dbm;
2960 /* iwlwifi: mvm/rx.c */
2962 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2963 * values are reported by the fw as positive values - need to negate
2964 * to obtain their dBM. Account for missing antennas by replacing 0
2965 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2968 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2970 int energy_a, energy_b, energy_c, max_energy;
2973 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2974 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2975 IWM_RX_INFO_ENERGY_ANT_A_POS;
2976 energy_a = energy_a ? -energy_a : -256;
2977 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2978 IWM_RX_INFO_ENERGY_ANT_B_POS;
2979 energy_b = energy_b ? -energy_b : -256;
2980 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2981 IWM_RX_INFO_ENERGY_ANT_C_POS;
2982 energy_c = energy_c ? -energy_c : -256;
2983 max_energy = MAX(energy_a, energy_b);
2984 max_energy = MAX(max_energy, energy_c);
2986 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2987 "energy In A %d B %d C %d , and max %d\n",
2988 energy_a, energy_b, energy_c, max_energy);
2994 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2995 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2997 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2999 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3000 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3002 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3006 * Retrieve the average noise (in dBm) among receivers.
3009 iwm_get_noise(struct iwm_softc *sc,
3010 const struct iwm_mvm_statistics_rx_non_phy *stats)
3012 int i, total, nbant, noise;
3014 total = nbant = noise = 0;
3015 for (i = 0; i < 3; i++) {
3016 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3017 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3028 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3029 __func__, nbant, total);
3031 /* There should be at least one antenna but check anyway. */
3032 return (nbant == 0) ? -127 : (total / nbant) - 107;
3034 /* For now, just hard-code it to -96 to be safe */
3040 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3042 * Handles the actual data of the Rx packet from the fw
3045 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3046 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3048 struct ieee80211com *ic = &sc->sc_ic;
3049 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3050 struct ieee80211_frame *wh;
3051 struct ieee80211_node *ni;
3052 struct ieee80211_rx_stats rxs;
3054 struct iwm_rx_phy_info *phy_info;
3055 struct iwm_rx_mpdu_res_start *rx_res;
3057 uint32_t rx_pkt_status;
3060 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3062 phy_info = &sc->sc_last_phy_info;
3063 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3064 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3065 len = le16toh(rx_res->byte_count);
3066 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3069 m->m_data = pkt->data + sizeof(*rx_res);
3070 m->m_pkthdr.len = m->m_len = len;
3072 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3073 device_printf(sc->sc_dev,
3074 "dsp size out of range [0,20]: %d\n",
3075 phy_info->cfg_phy_cnt);
3079 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3080 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3081 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3082 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3086 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3087 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3089 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3092 /* Note: RSSI is absolute (ie a -ve value) */
3093 if (rssi < IWM_MIN_DBM)
3095 else if (rssi > IWM_MAX_DBM)
3098 /* Map it to relative value */
3099 rssi = rssi - sc->sc_noise;
3101 /* replenish ring for the buffer we're going to feed to the sharks */
3102 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3103 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3108 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3109 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3111 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3113 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3114 "%s: phy_info: channel=%d, flags=0x%08x\n",
3116 le16toh(phy_info->channel),
3117 le16toh(phy_info->phy_flags));
3120 * Populate an RX state struct with the provided information.
3122 bzero(&rxs, sizeof(rxs));
3123 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3124 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3125 rxs.c_ieee = le16toh(phy_info->channel);
3126 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3127 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3129 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3132 /* rssi is in 1/2db units */
3133 rxs.c_rssi = rssi * 2;
3134 rxs.c_nf = sc->sc_noise;
3135 if (ieee80211_add_rx_params(m, &rxs) == 0) {
3137 ieee80211_free_node(ni);
3141 if (ieee80211_radiotap_active_vap(vap)) {
3142 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3145 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3146 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3147 tap->wr_chan_freq = htole16(rxs.c_freq);
3148 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3149 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3150 tap->wr_dbm_antsignal = (int8_t)rssi;
3151 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3152 tap->wr_tsft = phy_info->system_timestamp;
3153 switch (phy_info->rate) {
3155 case 10: tap->wr_rate = 2; break;
3156 case 20: tap->wr_rate = 4; break;
3157 case 55: tap->wr_rate = 11; break;
3158 case 110: tap->wr_rate = 22; break;
3160 case 0xd: tap->wr_rate = 12; break;
3161 case 0xf: tap->wr_rate = 18; break;
3162 case 0x5: tap->wr_rate = 24; break;
3163 case 0x7: tap->wr_rate = 36; break;
3164 case 0x9: tap->wr_rate = 48; break;
3165 case 0xb: tap->wr_rate = 72; break;
3166 case 0x1: tap->wr_rate = 96; break;
3167 case 0x3: tap->wr_rate = 108; break;
3168 /* Unknown rate: should not happen. */
3169 default: tap->wr_rate = 0;
3175 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3176 ieee80211_input_mimo(ni, m);
3177 ieee80211_free_node(ni);
3179 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3180 ieee80211_input_mimo_all(ic, m);
3187 counter_u64_add(ic->ic_ierrors, 1);
3191 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3192 struct iwm_node *in)
3194 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3195 struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3196 struct ieee80211_node *ni = &in->in_ni;
3197 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3199 KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3201 /* Update rate control statistics. */
3202 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3204 (int) le16toh(tx_resp->status.status),
3205 (int) le16toh(tx_resp->status.sequence),
3206 tx_resp->frame_count,
3207 tx_resp->bt_kill_count,
3208 tx_resp->failure_rts,
3209 tx_resp->failure_frame,
3210 le32toh(tx_resp->initial_rate),
3211 (int) le16toh(tx_resp->wireless_media_time));
3213 txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3214 IEEE80211_RATECTL_STATUS_LONG_RETRY;
3215 txs->short_retries = tx_resp->failure_rts;
3216 txs->long_retries = tx_resp->failure_frame;
3217 if (status != IWM_TX_STATUS_SUCCESS &&
3218 status != IWM_TX_STATUS_DIRECT_DONE) {
3220 case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3221 txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3223 case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3224 txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3226 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3227 txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3230 txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3234 txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3236 ieee80211_ratectl_tx_complete(ni, txs);
3238 return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3242 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3243 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3245 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3246 int idx = cmd_hdr->idx;
3247 int qid = cmd_hdr->qid;
3248 struct iwm_tx_ring *ring = &sc->txq[qid];
3249 struct iwm_tx_data *txd = &ring->data[idx];
3250 struct iwm_node *in = txd->in;
3251 struct mbuf *m = txd->m;
3254 KASSERT(txd->done == 0, ("txd not done"));
3255 KASSERT(txd->in != NULL, ("txd without node"));
3256 KASSERT(txd->m != NULL, ("txd without mbuf"));
3258 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3260 sc->sc_tx_timer = 0;
3262 status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3264 /* Unmap and free mbuf. */
3265 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3266 bus_dmamap_unload(ring->data_dmat, txd->map);
3268 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3269 "free txd %p, in %p\n", txd, txd->in);
3274 ieee80211_tx_complete(&in->in_ni, m, status);
3276 if (--ring->queued < IWM_TX_RING_LOMARK) {
3277 sc->qfullmsk &= ~(1 << ring->qid);
3278 if (sc->qfullmsk == 0) {
3280 * Well, we're in interrupt context, but then again
3281 * I guess net80211 does all sorts of stunts in
3282 * interrupt context, so maybe this is no biggie.
3294 * Process a "command done" firmware notification. This is where we wakeup
3295 * processes waiting for a synchronous command completion.
3299 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3301 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3302 struct iwm_tx_data *data;
3304 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3305 return; /* Not a command ack. */
3308 /* XXX wide commands? */
3309 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3310 "cmd notification type 0x%x qid %d idx %d\n",
3311 pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3313 data = &ring->data[pkt->hdr.idx];
3315 /* If the command was mapped in an mbuf, free it. */
3316 if (data->m != NULL) {
3317 bus_dmamap_sync(ring->data_dmat, data->map,
3318 BUS_DMASYNC_POSTWRITE);
3319 bus_dmamap_unload(ring->data_dmat, data->map);
3323 wakeup(&ring->desc[pkt->hdr.idx]);
3328 * necessary only for block ack mode
3331 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3334 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3337 scd_bc_tbl = sc->sched_dma.vaddr;
3339 len += 8; /* magic numbers came naturally from paris */
3340 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3341 len = roundup(len, 4) / 4;
3343 w_val = htole16(sta_id << 12 | len);
3345 /* Update TX scheduler. */
3346 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3347 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3348 BUS_DMASYNC_PREWRITE);
3350 /* I really wonder what this is ?!? */
3351 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3352 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3353 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3354 BUS_DMASYNC_PREWRITE);
3360 * Take an 802.11 (non-n) rate, find the relevant rate
3361 * table entry. return the index into in_ridx[].
3363 * The caller then uses that index back into in_ridx
3364 * to figure out the rate index programmed /into/
3365 * the firmware for this given node.
3368 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3374 for (i = 0; i < nitems(in->in_ridx); i++) {
3375 r = iwm_rates[in->in_ridx[i]].rate;
3380 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3381 "%s: couldn't find an entry for rate=%d\n",
3385 /* XXX Return the first */
3386 /* XXX TODO: have it return the /lowest/ */
3391 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3395 for (i = 0; i < nitems(iwm_rates); i++) {
3396 if (iwm_rates[i].rate == rate)
3400 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3401 "%s: couldn't find an entry for rate=%d\n",
3408 * Fill in the rate related information for a transmit command.
3410 static const struct iwm_rate *
3411 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3412 struct mbuf *m, struct iwm_tx_cmd *tx)
3414 struct ieee80211_node *ni = &in->in_ni;
3415 struct ieee80211_frame *wh;
3416 const struct ieee80211_txparam *tp = ni->ni_txparms;
3417 const struct iwm_rate *rinfo;
3419 int ridx, rate_flags;
3421 wh = mtod(m, struct ieee80211_frame *);
3422 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3424 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3425 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3427 if (type == IEEE80211_FC0_TYPE_MGT) {
3428 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3429 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3430 "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3431 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3432 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3433 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3434 "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3435 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3436 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3437 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3438 "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3439 } else if (m->m_flags & M_EAPOL) {
3440 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3441 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3442 "%s: EAPOL\n", __func__);
3443 } else if (type == IEEE80211_FC0_TYPE_DATA) {
3446 /* for data frames, use RS table */
3447 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3448 /* XXX pass pktlen */
3449 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3450 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3451 ridx = in->in_ridx[i];
3453 /* This is the index into the programmed table */
3454 tx->initial_rate_index = i;
3455 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3457 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3458 "%s: start with i=%d, txrate %d\n",
3459 __func__, i, iwm_rates[ridx].rate);
3461 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3462 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3463 __func__, tp->mgmtrate);
3466 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3467 "%s: frame type=%d txrate %d\n",
3468 __func__, type, iwm_rates[ridx].rate);
3470 rinfo = &iwm_rates[ridx];
3472 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3475 !! (IWM_RIDX_IS_CCK(ridx))
3478 /* XXX TODO: hard-coded TX antenna? */
3479 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3480 if (IWM_RIDX_IS_CCK(ridx))
3481 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3482 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3489 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3491 struct ieee80211com *ic = &sc->sc_ic;
3492 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3493 struct iwm_node *in = IWM_NODE(ni);
3494 struct iwm_tx_ring *ring;
3495 struct iwm_tx_data *data;
3496 struct iwm_tfd *desc;
3497 struct iwm_device_cmd *cmd;
3498 struct iwm_tx_cmd *tx;
3499 struct ieee80211_frame *wh;
3500 struct ieee80211_key *k = NULL;
3502 const struct iwm_rate *rinfo;
3505 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3508 int i, totlen, error, pad;
3510 wh = mtod(m, struct ieee80211_frame *);
3511 hdrlen = ieee80211_anyhdrsize(wh);
3512 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3514 ring = &sc->txq[ac];
3515 desc = &ring->desc[ring->cur];
3516 memset(desc, 0, sizeof(*desc));
3517 data = &ring->data[ring->cur];
3519 /* Fill out iwm_tx_cmd to send to the firmware */
3520 cmd = &ring->cmd[ring->cur];
3521 cmd->hdr.code = IWM_TX_CMD;
3523 cmd->hdr.qid = ring->qid;
3524 cmd->hdr.idx = ring->cur;
3526 tx = (void *)cmd->data;
3527 memset(tx, 0, sizeof(*tx));
3529 rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3531 /* Encrypt the frame if need be. */
3532 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3533 /* Retrieve key for TX && do software encryption. */
3534 k = ieee80211_crypto_encap(ni, m);
3539 /* 802.11 header may have moved. */
3540 wh = mtod(m, struct ieee80211_frame *);
3543 if (ieee80211_radiotap_active_vap(vap)) {
3544 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3547 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3548 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3549 tap->wt_rate = rinfo->rate;
3551 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3552 ieee80211_radiotap_tx(vap, m);
3556 totlen = m->m_pkthdr.len;
3559 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3560 flags |= IWM_TX_CMD_FLG_ACK;
3563 if (type == IEEE80211_FC0_TYPE_DATA
3564 && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3565 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3566 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3569 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3570 type != IEEE80211_FC0_TYPE_DATA)
3571 tx->sta_id = sc->sc_aux_sta.sta_id;
3573 tx->sta_id = IWM_STATION_ID;
3575 if (type == IEEE80211_FC0_TYPE_MGT) {
3576 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3578 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3579 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3580 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3581 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3582 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3584 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3587 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3591 /* First segment length must be a multiple of 4. */
3592 flags |= IWM_TX_CMD_FLG_MH_PAD;
3593 pad = 4 - (hdrlen & 3);
3597 tx->driver_txop = 0;
3598 tx->next_frame_len = 0;
3600 tx->len = htole16(totlen);
3601 tx->tid_tspec = tid;
3602 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3604 /* Set physical address of "scratch area". */
3605 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3606 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3608 /* Copy 802.11 header in TX command. */
3609 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3611 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3614 tx->tx_flags |= htole32(flags);
3616 /* Trim 802.11 header. */
3618 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3619 segs, &nsegs, BUS_DMA_NOWAIT);
3621 if (error != EFBIG) {
3622 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3627 /* Too many DMA segments, linearize mbuf. */
3628 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3630 device_printf(sc->sc_dev,
3631 "%s: could not defrag mbuf\n", __func__);
3637 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3638 segs, &nsegs, BUS_DMA_NOWAIT);
3640 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3650 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3651 "sending txd %p, in %p\n", data, data->in);
3652 KASSERT(data->in != NULL, ("node is NULL"));
3654 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3655 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3656 ring->qid, ring->cur, totlen, nsegs,
3657 le32toh(tx->tx_flags),
3658 le32toh(tx->rate_n_flags),
3659 tx->initial_rate_index
3662 /* Fill TX descriptor. */
3663 desc->num_tbs = 2 + nsegs;
3665 desc->tbs[0].lo = htole32(data->cmd_paddr);
3666 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3668 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3669 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3670 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3671 + hdrlen + pad - TB0_SIZE) << 4);
3673 /* Other DMA segments are for data payload. */
3674 for (i = 0; i < nsegs; i++) {
3676 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3677 desc->tbs[i+2].hi_n_len = \
3678 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3679 | ((seg->ds_len) << 4);
3682 bus_dmamap_sync(ring->data_dmat, data->map,
3683 BUS_DMASYNC_PREWRITE);
3684 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3685 BUS_DMASYNC_PREWRITE);
3686 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3687 BUS_DMASYNC_PREWRITE);
3690 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3694 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3695 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3697 /* Mark TX ring as full if we reach a certain threshold. */
3698 if (++ring->queued > IWM_TX_RING_HIMARK) {
3699 sc->qfullmsk |= 1 << ring->qid;
3706 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3707 const struct ieee80211_bpf_params *params)
3709 struct ieee80211com *ic = ni->ni_ic;
3710 struct iwm_softc *sc = ic->ic_softc;
3713 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3714 "->%s begin\n", __func__);
3716 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3718 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3719 "<-%s not RUNNING\n", __func__);
3725 if (params == NULL) {
3726 error = iwm_tx(sc, m, ni, 0);
3728 error = iwm_tx(sc, m, ni, 0);
3730 sc->sc_tx_timer = 5;
3741 * Note that there are transports that buffer frames before they reach
3742 * the firmware. This means that after flush_tx_path is called, the
3743 * queue might not be empty. The race-free way to handle this is to:
3744 * 1) set the station as draining
3745 * 2) flush the Tx path
3746 * 3) wait for the transport queues to be empty
3749 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3752 struct iwm_tx_path_flush_cmd flush_cmd = {
3753 .queues_ctl = htole32(tfd_msk),
3754 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3757 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3758 sizeof(flush_cmd), &flush_cmd);
3760 device_printf(sc->sc_dev,
3761 "Flushing tx queue failed: %d\n", ret);
3770 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3771 struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3773 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3777 /* send station add/update command to firmware */
3779 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3781 struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3785 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3787 add_sta_cmd.sta_id = IWM_STATION_ID;
3788 add_sta_cmd.mac_id_n_color
3789 = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3790 IWM_DEFAULT_COLOR));
3793 for (ac = 0; ac < WME_NUM_AC; ac++) {
3794 add_sta_cmd.tfd_queue_msk |=
3795 htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3797 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3799 add_sta_cmd.add_modify = update ? 1 : 0;
3800 add_sta_cmd.station_flags_msk
3801 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3802 add_sta_cmd.tid_disable_tx = htole16(0xffff);
3804 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3806 status = IWM_ADD_STA_SUCCESS;
3807 ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3812 case IWM_ADD_STA_SUCCESS:
3816 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3824 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3826 return iwm_mvm_sta_send_to_fw(sc, in, 0);
3830 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3832 return iwm_mvm_sta_send_to_fw(sc, in, 1);
3836 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3837 const uint8_t *addr, uint16_t mac_id, uint16_t color)
3839 struct iwm_mvm_add_sta_cmd_v7 cmd;
3843 memset(&cmd, 0, sizeof(cmd));
3844 cmd.sta_id = sta->sta_id;
3845 cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3847 cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3848 cmd.tid_disable_tx = htole16(0xffff);
3851 IEEE80211_ADDR_COPY(cmd.addr, addr);
3853 ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3858 case IWM_ADD_STA_SUCCESS:
3859 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3860 "%s: Internal station added.\n", __func__);
3863 device_printf(sc->sc_dev,
3864 "%s: Add internal station failed, status=0x%x\n",
3873 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3877 sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3878 sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3880 ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3884 ret = iwm_mvm_add_int_sta_common(sc,
3885 &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3888 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3901 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3903 struct iwm_time_quota_cmd cmd;
3904 int i, idx, ret, num_active_macs, quota, quota_rem;
3905 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3906 int n_ifs[IWM_MAX_BINDINGS] = {0, };
3909 memset(&cmd, 0, sizeof(cmd));
3911 /* currently, PHY ID == binding ID */
3913 id = in->in_phyctxt->id;
3914 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3915 colors[id] = in->in_phyctxt->color;
3922 * The FW's scheduling session consists of
3923 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3924 * equally between all the bindings that require quota
3926 num_active_macs = 0;
3927 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3928 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3929 num_active_macs += n_ifs[i];
3934 if (num_active_macs) {
3935 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3936 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3939 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3943 cmd.quotas[idx].id_and_color =
3944 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3946 if (n_ifs[i] <= 0) {
3947 cmd.quotas[idx].quota = htole32(0);
3948 cmd.quotas[idx].max_duration = htole32(0);
3950 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3951 cmd.quotas[idx].max_duration = htole32(0);
3956 /* Give the remainder of the session to the first binding */
3957 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3959 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3962 device_printf(sc->sc_dev,
3963 "%s: Failed to send quota: %d\n", __func__, ret);
3972 * ieee80211 routines
3976 * Change to AUTH state in 80211 state machine. Roughly matches what
3977 * Linux does in bss_info_changed().
3980 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3982 struct ieee80211_node *ni;
3983 struct iwm_node *in;
3984 struct iwm_vap *iv = IWM_VAP(vap);
3989 * XXX i have a feeling that the vap node is being
3990 * freed from underneath us. Grr.
3992 ni = ieee80211_ref_node(vap->iv_bss);
3994 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3995 "%s: called; vap=%p, bss ni=%p\n",
4002 error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4006 error = iwm_allow_mcast(vap, sc);
4008 device_printf(sc->sc_dev,
4009 "%s: failed to set multicast\n", __func__);
4014 * This is where it deviates from what Linux does.
4016 * Linux iwlwifi doesn't reset the nic each time, nor does it
4017 * call ctxt_add() here. Instead, it adds it during vap creation,
4018 * and always does a mac_ctx_changed().
4020 * The openbsd port doesn't attempt to do that - it reset things
4021 * at odd states and does the add here.
4023 * So, until the state handling is fixed (ie, we never reset
4024 * the NIC except for a firmware failure, which should drag
4025 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4026 * contexts that are required), let's do a dirty hack here.
4028 if (iv->is_uploaded) {
4029 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4030 device_printf(sc->sc_dev,
4031 "%s: failed to update MAC\n", __func__);
4034 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4035 in->in_ni.ni_chan, 1, 1)) != 0) {
4036 device_printf(sc->sc_dev,
4037 "%s: failed update phy ctxt\n", __func__);
4040 in->in_phyctxt = &sc->sc_phyctxt[0];
4042 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
4043 device_printf(sc->sc_dev,
4044 "%s: binding update cmd\n", __func__);
4047 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4048 device_printf(sc->sc_dev,
4049 "%s: failed to update sta\n", __func__);
4053 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4054 device_printf(sc->sc_dev,
4055 "%s: failed to add MAC\n", __func__);
4058 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4059 in->in_ni.ni_chan, 1, 1)) != 0) {
4060 device_printf(sc->sc_dev,
4061 "%s: failed add phy ctxt!\n", __func__);
4065 in->in_phyctxt = &sc->sc_phyctxt[0];
4067 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
4068 device_printf(sc->sc_dev,
4069 "%s: binding add cmd\n", __func__);
4072 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4073 device_printf(sc->sc_dev,
4074 "%s: failed to add sta\n", __func__);
4080 * Prevent the FW from wandering off channel during association
4081 * by "protecting" the session with a time event.
4083 /* XXX duration is in units of TU, not MS */
4084 duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4085 iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
4090 ieee80211_free_node(ni);
4095 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4097 struct iwm_node *in = IWM_NODE(vap->iv_bss);
4100 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4101 device_printf(sc->sc_dev,
4102 "%s: failed to update STA\n", __func__);
4107 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4108 device_printf(sc->sc_dev,
4109 "%s: failed to update MAC\n", __func__);
4117 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4122 * Ok, so *technically* the proper set of calls for going
4123 * from RUN back to SCAN is:
4125 * iwm_mvm_power_mac_disable(sc, in);
4126 * iwm_mvm_mac_ctxt_changed(sc, in);
4127 * iwm_mvm_rm_sta(sc, in);
4128 * iwm_mvm_update_quotas(sc, NULL);
4129 * iwm_mvm_mac_ctxt_changed(sc, in);
4130 * iwm_mvm_binding_remove_vif(sc, in);
4131 * iwm_mvm_mac_ctxt_remove(sc, in);
4133 * However, that freezes the device not matter which permutations
4134 * and modifications are attempted. Obviously, this driver is missing
4135 * something since it works in the Linux driver, but figuring out what
4136 * is missing is a little more complicated. Now, since we're going
4137 * back to nothing anyway, we'll just do a complete device reset.
4138 * Up your's, device!
4141 * Just using 0xf for the queues mask is fine as long as we only
4142 * get here from RUN state.
4145 mbufq_drain(&sc->sc_snd);
4146 iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4148 * We seem to get away with just synchronously sending the
4149 * IWM_TXPATH_FLUSH command.
4151 // iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4152 iwm_stop_device(sc);
4161 iwm_mvm_power_mac_disable(sc, in);
4163 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4164 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4168 if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4169 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4172 error = iwm_mvm_rm_sta(sc, in);
4174 iwm_mvm_update_quotas(sc, NULL);
4175 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4176 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4179 iwm_mvm_binding_remove_vif(sc, in);
4181 iwm_mvm_mac_ctxt_remove(sc, in);
4187 static struct ieee80211_node *
4188 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4190 return malloc(sizeof (struct iwm_node), M_80211_NODE,
4195 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4197 struct ieee80211_node *ni = &in->in_ni;
4198 struct iwm_lq_cmd *lq = &in->in_lq;
4199 int nrates = ni->ni_rates.rs_nrates;
4200 int i, ridx, tab = 0;
4203 if (nrates > nitems(lq->rs_table)) {
4204 device_printf(sc->sc_dev,
4205 "%s: node supports %d rates, driver handles "
4206 "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4210 device_printf(sc->sc_dev,
4211 "%s: node supports 0 rates, odd!\n", __func__);
4216 * XXX .. and most of iwm_node is not initialised explicitly;
4217 * it's all just 0x0 passed to the firmware.
4220 /* first figure out which rates we should support */
4221 /* XXX TODO: this isn't 11n aware /at all/ */
4222 memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4223 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4224 "%s: nrates=%d\n", __func__, nrates);
4227 * Loop over nrates and populate in_ridx from the highest
4228 * rate to the lowest rate. Remember, in_ridx[] has
4229 * IEEE80211_RATE_MAXSIZE entries!
4231 for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4232 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4234 /* Map 802.11 rate to HW rate index. */
4235 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4236 if (iwm_rates[ridx].rate == rate)
4238 if (ridx > IWM_RIDX_MAX) {
4239 device_printf(sc->sc_dev,
4240 "%s: WARNING: device rate for %d not found!\n",
4243 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4244 "%s: rate: i: %d, rate=%d, ridx=%d\n",
4249 in->in_ridx[i] = ridx;
4253 /* then construct a lq_cmd based on those */
4254 memset(lq, 0, sizeof(*lq));
4255 lq->sta_id = IWM_STATION_ID;
4257 /* For HT, always enable RTS/CTS to avoid excessive retries. */
4258 if (ni->ni_flags & IEEE80211_NODE_HT)
4259 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4262 * are these used? (we don't do SISO or MIMO)
4263 * need to set them to non-zero, though, or we get an error.
4265 lq->single_stream_ant_msk = 1;
4266 lq->dual_stream_ant_msk = 1;
4269 * Build the actual rate selection table.
4270 * The lowest bits are the rates. Additionally,
4271 * CCK needs bit 9 to be set. The rest of the bits
4272 * we add to the table select the tx antenna
4273 * Note that we add the rates in the highest rate first
4274 * (opposite of ni_rates).
4277 * XXX TODO: this should be looping over the min of nrates
4278 * and LQ_MAX_RETRY_NUM. Sigh.
4280 for (i = 0; i < nrates; i++) {
4285 txant = iwm_mvm_get_valid_tx_ant(sc);
4286 nextant = 1<<(ffs(txant)-1);
4289 nextant = iwm_mvm_get_valid_tx_ant(sc);
4292 * Map the rate id into a rate index into
4293 * our hardware table containing the
4294 * configuration to use for this rate.
4296 ridx = in->in_ridx[i];
4297 tab = iwm_rates[ridx].plcp;
4298 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4299 if (IWM_RIDX_IS_CCK(ridx))
4300 tab |= IWM_RATE_MCS_CCK_MSK;
4301 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4302 "station rate i=%d, rate=%d, hw=%x\n",
4303 i, iwm_rates[ridx].rate, tab);
4304 lq->rs_table[i] = htole32(tab);
4306 /* then fill the rest with the lowest possible rate */
4307 for (i = nrates; i < nitems(lq->rs_table); i++) {
4308 KASSERT(tab != 0, ("invalid tab"));
4309 lq->rs_table[i] = htole32(tab);
4314 iwm_media_change(struct ifnet *ifp)
4316 struct ieee80211vap *vap = ifp->if_softc;
4317 struct ieee80211com *ic = vap->iv_ic;
4318 struct iwm_softc *sc = ic->ic_softc;
4321 error = ieee80211_media_change(ifp);
4322 if (error != ENETRESET)
4326 if (ic->ic_nrunning > 0) {
4336 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4338 struct iwm_vap *ivp = IWM_VAP(vap);
4339 struct ieee80211com *ic = vap->iv_ic;
4340 struct iwm_softc *sc = ic->ic_softc;
4341 struct iwm_node *in;
4344 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4345 "switching state %s -> %s\n",
4346 ieee80211_state_name[vap->iv_state],
4347 ieee80211_state_name[nstate]);
4348 IEEE80211_UNLOCK(ic);
4351 if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4352 iwm_led_blink_stop(sc);
4354 /* disable beacon filtering if we're hopping out of RUN */
4355 if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4356 iwm_mvm_disable_beacon_filter(sc);
4358 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4361 if (nstate == IEEE80211_S_INIT) {
4364 error = ivp->iv_newstate(vap, nstate, arg);
4365 IEEE80211_UNLOCK(ic);
4367 iwm_release(sc, NULL);
4374 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4375 * above then the card will be completely reinitialized,
4376 * so the driver must do everything necessary to bring the card
4377 * from INIT to SCAN.
4379 * Additionally, upon receiving deauth frame from AP,
4380 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4381 * state. This will also fail with this driver, so bring the FSM
4382 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4384 * XXX TODO: fix this for FreeBSD!
4386 if (nstate == IEEE80211_S_SCAN ||
4387 nstate == IEEE80211_S_AUTH ||
4388 nstate == IEEE80211_S_ASSOC) {
4389 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4390 "Force transition to INIT; MGT=%d\n", arg);
4393 /* Always pass arg as -1 since we can't Tx right now. */
4395 * XXX arg is just ignored anyway when transitioning
4396 * to IEEE80211_S_INIT.
4398 vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4399 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4400 "Going INIT->SCAN\n");
4401 nstate = IEEE80211_S_SCAN;
4402 IEEE80211_UNLOCK(ic);
4408 case IEEE80211_S_INIT:
4411 case IEEE80211_S_AUTH:
4412 if ((error = iwm_auth(vap, sc)) != 0) {
4413 device_printf(sc->sc_dev,
4414 "%s: could not move to auth state: %d\n",
4420 case IEEE80211_S_ASSOC:
4421 if ((error = iwm_assoc(vap, sc)) != 0) {
4422 device_printf(sc->sc_dev,
4423 "%s: failed to associate: %d\n", __func__,
4429 case IEEE80211_S_RUN:
4431 struct iwm_host_cmd cmd = {
4433 .len = { sizeof(in->in_lq), },
4434 .flags = IWM_CMD_SYNC,
4437 /* Update the association state, now we have it all */
4438 /* (eg associd comes in at this point */
4439 error = iwm_assoc(vap, sc);
4441 device_printf(sc->sc_dev,
4442 "%s: failed to update association state: %d\n",
4448 in = IWM_NODE(vap->iv_bss);
4449 iwm_mvm_power_mac_update_mode(sc, in);
4450 iwm_mvm_enable_beacon_filter(sc, in);
4451 iwm_mvm_update_quotas(sc, in);
4452 iwm_setrates(sc, in);
4454 cmd.data[0] = &in->in_lq;
4455 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4456 device_printf(sc->sc_dev,
4457 "%s: IWM_LQ_CMD failed\n", __func__);
4460 iwm_mvm_led_enable(sc);
4470 return (ivp->iv_newstate(vap, nstate, arg));
4474 iwm_endscan_cb(void *arg, int pending)
4476 struct iwm_softc *sc = arg;
4477 struct ieee80211com *ic = &sc->sc_ic;
4479 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4483 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4487 * Aging and idle timeouts for the different possible scenarios
4488 * in default configuration
4490 static const uint32_t
4491 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4493 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4494 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4497 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4498 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4501 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4502 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4505 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4506 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4509 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4510 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4515 * Aging and idle timeouts for the different possible scenarios
4516 * in single BSS MAC configuration.
4518 static const uint32_t
4519 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4521 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4522 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4525 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4526 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4529 htole32(IWM_SF_MCAST_AGING_TIMER),
4530 htole32(IWM_SF_MCAST_IDLE_TIMER)
4533 htole32(IWM_SF_BA_AGING_TIMER),
4534 htole32(IWM_SF_BA_IDLE_TIMER)
4537 htole32(IWM_SF_TX_RE_AGING_TIMER),
4538 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4543 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4544 struct ieee80211_node *ni)
4546 int i, j, watermark;
4548 sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4551 * If we are in association flow - check antenna configuration
4552 * capabilities of the AP station, and choose the watermark accordingly.
4555 if (ni->ni_flags & IEEE80211_NODE_HT) {
4557 if (ni->ni_rxmcs[2] != 0)
4558 watermark = IWM_SF_W_MARK_MIMO3;
4559 else if (ni->ni_rxmcs[1] != 0)
4560 watermark = IWM_SF_W_MARK_MIMO2;
4563 watermark = IWM_SF_W_MARK_SISO;
4565 watermark = IWM_SF_W_MARK_LEGACY;
4567 /* default watermark value for unassociated mode. */
4569 watermark = IWM_SF_W_MARK_MIMO2;
4571 sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4573 for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4574 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4575 sf_cmd->long_delay_timeouts[i][j] =
4576 htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4581 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4582 sizeof(iwm_sf_full_timeout));
4584 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4585 sizeof(iwm_sf_full_timeout_def));
4590 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4592 struct ieee80211com *ic = &sc->sc_ic;
4593 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4594 struct iwm_sf_cfg_cmd sf_cmd = {
4595 .state = htole32(IWM_SF_FULL_ON),
4599 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4600 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4602 switch (new_state) {
4604 case IWM_SF_INIT_OFF:
4605 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4607 case IWM_SF_FULL_ON:
4608 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4611 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4612 "Invalid state: %d. not sending Smart Fifo cmd\n",
4617 ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4618 sizeof(sf_cmd), &sf_cmd);
4623 iwm_send_bt_init_conf(struct iwm_softc *sc)
4625 struct iwm_bt_coex_cmd bt_cmd;
4627 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4628 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4630 return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4635 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4637 struct iwm_mcc_update_cmd mcc_cmd;
4638 struct iwm_host_cmd hcmd = {
4639 .id = IWM_MCC_UPDATE_CMD,
4640 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4641 .data = { &mcc_cmd },
4645 struct iwm_rx_packet *pkt;
4646 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4647 struct iwm_mcc_update_resp *mcc_resp;
4651 int resp_v2 = isset(sc->sc_enabled_capa,
4652 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4654 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4655 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4656 if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4657 isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4658 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4660 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4663 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4665 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4667 IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4668 "send MCC update to FW with '%c%c' src = %d\n",
4669 alpha2[0], alpha2[1], mcc_cmd.source_id);
4671 ret = iwm_send_cmd(sc, &hcmd);
4676 pkt = hcmd.resp_pkt;
4678 /* Extract MCC response */
4680 mcc_resp = (void *)pkt->data;
4681 mcc = mcc_resp->mcc;
4682 n_channels = le32toh(mcc_resp->n_channels);
4684 mcc_resp_v1 = (void *)pkt->data;
4685 mcc = mcc_resp_v1->mcc;
4686 n_channels = le32toh(mcc_resp_v1->n_channels);
4689 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4691 mcc = 0x3030; /* "00" - world */
4693 IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4694 "regulatory domain '%c%c' (%d channels available)\n",
4695 mcc >> 8, mcc & 0xff, n_channels);
4697 iwm_free_resp(sc, &hcmd);
4703 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4705 struct iwm_host_cmd cmd = {
4706 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4707 .len = { sizeof(uint32_t), },
4708 .data = { &backoff, },
4711 if (iwm_send_cmd(sc, &cmd) != 0) {
4712 device_printf(sc->sc_dev,
4713 "failed to change thermal tx backoff\n");
4718 iwm_init_hw(struct iwm_softc *sc)
4720 struct ieee80211com *ic = &sc->sc_ic;
4723 if ((error = iwm_start_hw(sc)) != 0) {
4724 printf("iwm_start_hw: failed %d\n", error);
4728 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4729 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4734 * should stop and start HW since that INIT
4737 iwm_stop_device(sc);
4738 if ((error = iwm_start_hw(sc)) != 0) {
4739 device_printf(sc->sc_dev, "could not initialize hardware\n");
4743 /* omstart, this time with the regular firmware */
4744 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4746 device_printf(sc->sc_dev, "could not load firmware\n");
4750 if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4751 device_printf(sc->sc_dev, "bt init conf failed\n");
4755 error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4757 device_printf(sc->sc_dev, "antenna config failed\n");
4761 /* Send phy db control command and then phy db calibration */
4762 if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4765 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4766 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4770 /* Add auxiliary station for scanning */
4771 if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4772 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4776 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4778 * The channel used here isn't relevant as it's
4779 * going to be overwritten in the other flows.
4780 * For now use the first channel we have.
4782 if ((error = iwm_mvm_phy_ctxt_add(sc,
4783 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4787 /* Initialize tx backoffs to the minimum. */
4788 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4789 iwm_mvm_tt_tx_backoff(sc, 0);
4791 error = iwm_mvm_power_update_device(sc);
4795 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4796 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4800 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4801 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4805 /* Enable Tx queues. */
4806 for (ac = 0; ac < WME_NUM_AC; ac++) {
4807 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4808 iwm_mvm_ac_to_tx_fifo[ac]);
4813 if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4814 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4821 iwm_stop_device(sc);
4825 /* Allow multicast from our BSSID. */
4827 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4829 struct ieee80211_node *ni = vap->iv_bss;
4830 struct iwm_mcast_filter_cmd *cmd;
4834 size = roundup(sizeof(*cmd), 4);
4835 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4838 cmd->filter_own = 1;
4842 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4844 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4845 IWM_CMD_SYNC, size, cmd);
4846 free(cmd, M_DEVBUF);
4856 iwm_init(struct iwm_softc *sc)
4860 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4863 sc->sc_generation++;
4864 sc->sc_flags &= ~IWM_FLAG_STOPPED;
4866 if ((error = iwm_init_hw(sc)) != 0) {
4867 printf("iwm_init_hw failed %d\n", error);
4873 * Ok, firmware loaded and we are jogging
4875 sc->sc_flags |= IWM_FLAG_HW_INITED;
4876 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4880 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4882 struct iwm_softc *sc;
4888 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4892 error = mbufq_enqueue(&sc->sc_snd, m);
4903 * Dequeue packets from sendq and call send.
4906 iwm_start(struct iwm_softc *sc)
4908 struct ieee80211_node *ni;
4912 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4913 while (sc->qfullmsk == 0 &&
4914 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4915 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4916 if (iwm_tx(sc, m, ni, ac) != 0) {
4917 if_inc_counter(ni->ni_vap->iv_ifp,
4918 IFCOUNTER_OERRORS, 1);
4919 ieee80211_free_node(ni);
4922 sc->sc_tx_timer = 15;
4924 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4928 iwm_stop(struct iwm_softc *sc)
4931 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4932 sc->sc_flags |= IWM_FLAG_STOPPED;
4933 sc->sc_generation++;
4934 iwm_led_blink_stop(sc);
4935 sc->sc_tx_timer = 0;
4936 iwm_stop_device(sc);
4940 iwm_watchdog(void *arg)
4942 struct iwm_softc *sc = arg;
4943 struct ieee80211com *ic = &sc->sc_ic;
4945 if (sc->sc_tx_timer > 0) {
4946 if (--sc->sc_tx_timer == 0) {
4947 device_printf(sc->sc_dev, "device timeout\n");
4951 ieee80211_restart_all(ic);
4952 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4956 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4960 iwm_parent(struct ieee80211com *ic)
4962 struct iwm_softc *sc = ic->ic_softc;
4966 if (ic->ic_nrunning > 0) {
4967 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4971 } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4975 ieee80211_start_all(ic);
4979 * The interrupt side of things
4983 * error dumping routines are from iwlwifi/mvm/utils.c
4987 * Note: This structure is read from the device with IO accesses,
4988 * and the reading already does the endian conversion. As it is
4989 * read with uint32_t-sized accesses, any members with a different size
4990 * need to be ordered correctly though!
4992 struct iwm_error_event_table {
4993 uint32_t valid; /* (nonzero) valid, (0) log is empty */
4994 uint32_t error_id; /* type of error */
4995 uint32_t trm_hw_status0; /* TRM HW status */
4996 uint32_t trm_hw_status1; /* TRM HW status */
4997 uint32_t blink2; /* branch link */
4998 uint32_t ilink1; /* interrupt link */
4999 uint32_t ilink2; /* interrupt link */
5000 uint32_t data1; /* error-specific data */
5001 uint32_t data2; /* error-specific data */
5002 uint32_t data3; /* error-specific data */
5003 uint32_t bcon_time; /* beacon timer */
5004 uint32_t tsf_low; /* network timestamp function timer */
5005 uint32_t tsf_hi; /* network timestamp function timer */
5006 uint32_t gp1; /* GP1 timer register */
5007 uint32_t gp2; /* GP2 timer register */
5008 uint32_t fw_rev_type; /* firmware revision type */
5009 uint32_t major; /* uCode version major */
5010 uint32_t minor; /* uCode version minor */
5011 uint32_t hw_ver; /* HW Silicon version */
5012 uint32_t brd_ver; /* HW board version */
5013 uint32_t log_pc; /* log program counter */
5014 uint32_t frame_ptr; /* frame pointer */
5015 uint32_t stack_ptr; /* stack pointer */
5016 uint32_t hcmd; /* last host command header */
5017 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
5019 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
5021 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
5023 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
5025 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
5027 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
5028 uint32_t wait_event; /* wait event() caller address */
5029 uint32_t l2p_control; /* L2pControlField */
5030 uint32_t l2p_duration; /* L2pDurationField */
5031 uint32_t l2p_mhvalid; /* L2pMhValidBits */
5032 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
5033 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
5035 uint32_t u_timestamp; /* indicate when the date and time of the
5037 uint32_t flow_handler; /* FH read/write pointers, RX credit */
5038 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5041 * UMAC error struct - relevant starting from family 8000 chip.
5042 * Note: This structure is read from the device with IO accesses,
5043 * and the reading already does the endian conversion. As it is
5044 * read with u32-sized accesses, any members with a different size
5045 * need to be ordered correctly though!
5047 struct iwm_umac_error_event_table {
5048 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5049 uint32_t error_id; /* type of error */
5050 uint32_t blink1; /* branch link */
5051 uint32_t blink2; /* branch link */
5052 uint32_t ilink1; /* interrupt link */
5053 uint32_t ilink2; /* interrupt link */
5054 uint32_t data1; /* error-specific data */
5055 uint32_t data2; /* error-specific data */
5056 uint32_t data3; /* error-specific data */
5057 uint32_t umac_major;
5058 uint32_t umac_minor;
5059 uint32_t frame_pointer; /* core register 27*/
5060 uint32_t stack_pointer; /* core register 28 */
5061 uint32_t cmd_header; /* latest host cmd sent to UMAC */
5062 uint32_t nic_isr_pref; /* ISR status register */
5065 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
5066 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
5072 } advanced_lookup[] = {
5073 { "NMI_INTERRUPT_WDG", 0x34 },
5074 { "SYSASSERT", 0x35 },
5075 { "UCODE_VERSION_MISMATCH", 0x37 },
5076 { "BAD_COMMAND", 0x38 },
5077 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5078 { "FATAL_ERROR", 0x3D },
5079 { "NMI_TRM_HW_ERR", 0x46 },
5080 { "NMI_INTERRUPT_TRM", 0x4C },
5081 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5082 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5083 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5084 { "NMI_INTERRUPT_HOST", 0x66 },
5085 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5086 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5087 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5088 { "ADVANCED_SYSASSERT", 0 },
5092 iwm_desc_lookup(uint32_t num)
5096 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5097 if (advanced_lookup[i].num == num)
5098 return advanced_lookup[i].name;
5100 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5101 return advanced_lookup[i].name;
5105 iwm_nic_umac_error(struct iwm_softc *sc)
5107 struct iwm_umac_error_event_table table;
5110 base = sc->sc_uc.uc_umac_error_event_table;
5112 if (base < 0x800000) {
5113 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5118 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5119 device_printf(sc->sc_dev, "reading errlog failed\n");
5123 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5124 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5125 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5126 sc->sc_flags, table.valid);
5129 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5130 iwm_desc_lookup(table.error_id));
5131 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5132 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5133 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5135 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5137 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5138 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5139 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5140 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5141 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5142 device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5143 table.frame_pointer);
5144 device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5145 table.stack_pointer);
5146 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5147 device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5148 table.nic_isr_pref);
5152 * Support for dumping the error log seemed like a good idea ...
5153 * but it's mostly hex junk and the only sensible thing is the
5154 * hw/ucode revision (which we know anyway). Since it's here,
5155 * I'll just leave it in, just in case e.g. the Intel guys want to
5156 * help us decipher some "ADVANCED_SYSASSERT" later.
5159 iwm_nic_error(struct iwm_softc *sc)
5161 struct iwm_error_event_table table;
5164 device_printf(sc->sc_dev, "dumping device error log\n");
5165 base = sc->sc_uc.uc_error_event_table;
5166 if (base < 0x800000) {
5167 device_printf(sc->sc_dev,
5168 "Invalid error log pointer 0x%08x\n", base);
5172 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5173 device_printf(sc->sc_dev, "reading errlog failed\n");
5178 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5182 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5183 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5184 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5185 sc->sc_flags, table.valid);
5188 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5189 iwm_desc_lookup(table.error_id));
5190 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5191 table.trm_hw_status0);
5192 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5193 table.trm_hw_status1);
5194 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5195 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5196 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5197 device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5198 device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5199 device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5200 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5201 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5202 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5203 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5204 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5205 device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5207 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5208 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5209 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5210 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5211 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5212 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5213 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5214 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5215 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5216 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5217 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5218 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5219 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5220 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5221 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5222 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5223 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5224 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5225 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5227 if (sc->sc_uc.uc_umac_error_event_table)
5228 iwm_nic_umac_error(sc);
5232 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5235 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5236 * Basic structure from if_iwn
5239 iwm_notif_intr(struct iwm_softc *sc)
5241 struct ieee80211com *ic = &sc->sc_ic;
5244 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5245 BUS_DMASYNC_POSTREAD);
5247 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5252 while (sc->rxq.cur != hw) {
5253 struct iwm_rx_ring *ring = &sc->rxq;
5254 struct iwm_rx_data *data = &ring->data[ring->cur];
5255 struct iwm_rx_packet *pkt;
5256 struct iwm_cmd_response *cresp;
5259 bus_dmamap_sync(ring->data_dmat, data->map,
5260 BUS_DMASYNC_POSTREAD);
5261 pkt = mtod(data->m, struct iwm_rx_packet *);
5263 qid = pkt->hdr.qid & ~0x80;
5266 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5267 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5268 "rx packet qid=%d idx=%d type=%x %d %d\n",
5269 pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5272 * randomly get these from the firmware, no idea why.
5273 * they at least seem harmless, so just ignore them for now
5275 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5276 || pkt->len_n_flags == htole32(0x55550000))) {
5281 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5284 case IWM_REPLY_RX_PHY_CMD:
5285 iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5288 case IWM_REPLY_RX_MPDU_CMD:
5289 iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5293 iwm_mvm_rx_tx_cmd(sc, pkt, data);
5296 case IWM_MISSED_BEACONS_NOTIFICATION: {
5297 struct iwm_missed_beacons_notif *resp;
5300 /* XXX look at mac_id to determine interface ID */
5301 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5303 resp = (void *)pkt->data;
5304 missed = le32toh(resp->consec_missed_beacons);
5306 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5307 "%s: MISSED_BEACON: mac_id=%d, "
5308 "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5311 le32toh(resp->mac_id),
5312 le32toh(resp->consec_missed_beacons_since_last_rx),
5313 le32toh(resp->consec_missed_beacons),
5314 le32toh(resp->num_expected_beacons),
5315 le32toh(resp->num_recvd_beacons));
5321 /* XXX no net80211 locking? */
5322 if (vap->iv_state == IEEE80211_S_RUN &&
5323 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5324 if (missed > vap->iv_bmissthreshold) {
5325 /* XXX bad locking; turn into task */
5327 ieee80211_beacon_miss(ic);
5334 case IWM_MFUART_LOAD_NOTIFICATION:
5337 case IWM_MVM_ALIVE: {
5338 struct iwm_mvm_alive_resp_v1 *resp1;
5339 struct iwm_mvm_alive_resp_v2 *resp2;
5340 struct iwm_mvm_alive_resp_v3 *resp3;
5342 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
5343 resp1 = (void *)pkt->data;
5344 sc->sc_uc.uc_error_event_table
5345 = le32toh(resp1->error_event_table_ptr);
5346 sc->sc_uc.uc_log_event_table
5347 = le32toh(resp1->log_event_table_ptr);
5348 sc->sched_base = le32toh(resp1->scd_base_ptr);
5349 if (resp1->status == IWM_ALIVE_STATUS_OK)
5350 sc->sc_uc.uc_ok = 1;
5352 sc->sc_uc.uc_ok = 0;
5355 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
5356 resp2 = (void *)pkt->data;
5357 sc->sc_uc.uc_error_event_table
5358 = le32toh(resp2->error_event_table_ptr);
5359 sc->sc_uc.uc_log_event_table
5360 = le32toh(resp2->log_event_table_ptr);
5361 sc->sched_base = le32toh(resp2->scd_base_ptr);
5362 sc->sc_uc.uc_umac_error_event_table
5363 = le32toh(resp2->error_info_addr);
5364 if (resp2->status == IWM_ALIVE_STATUS_OK)
5365 sc->sc_uc.uc_ok = 1;
5367 sc->sc_uc.uc_ok = 0;
5370 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
5371 resp3 = (void *)pkt->data;
5372 sc->sc_uc.uc_error_event_table
5373 = le32toh(resp3->error_event_table_ptr);
5374 sc->sc_uc.uc_log_event_table
5375 = le32toh(resp3->log_event_table_ptr);
5376 sc->sched_base = le32toh(resp3->scd_base_ptr);
5377 sc->sc_uc.uc_umac_error_event_table
5378 = le32toh(resp3->error_info_addr);
5379 if (resp3->status == IWM_ALIVE_STATUS_OK)
5380 sc->sc_uc.uc_ok = 1;
5382 sc->sc_uc.uc_ok = 0;
5385 sc->sc_uc.uc_intr = 1;
5389 case IWM_CALIB_RES_NOTIF_PHY_DB:
5390 iwm_phy_db_set_section(sc->sc_phy_db, pkt);
5393 case IWM_STATISTICS_NOTIFICATION: {
5394 struct iwm_notif_statistics *stats;
5395 stats = (void *)pkt->data;
5396 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5397 sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5400 case IWM_NVM_ACCESS_CMD:
5401 case IWM_MCC_UPDATE_CMD:
5402 if (sc->sc_wantresp == ((qid << 16) | idx)) {
5403 memcpy(sc->sc_cmd_resp,
5404 pkt, sizeof(sc->sc_cmd_resp));
5408 case IWM_MCC_CHUB_UPDATE_CMD: {
5409 struct iwm_mcc_chub_notif *notif;
5410 notif = (void *)pkt->data;
5412 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5413 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5414 sc->sc_fw_mcc[2] = '\0';
5415 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5416 "fw source %d sent CC '%s'\n",
5417 notif->source_id, sc->sc_fw_mcc);
5420 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5423 case IWM_PHY_CONFIGURATION_CMD:
5424 case IWM_TX_ANT_CONFIGURATION_CMD:
5426 case IWM_MAC_CONTEXT_CMD:
5427 case IWM_REPLY_SF_CFG_CMD:
5428 case IWM_POWER_TABLE_CMD:
5429 case IWM_PHY_CONTEXT_CMD:
5430 case IWM_BINDING_CONTEXT_CMD:
5431 case IWM_TIME_EVENT_CMD:
5432 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5433 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5434 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5435 case IWM_REPLY_BEACON_FILTERING_CMD:
5436 case IWM_MAC_PM_POWER_TABLE:
5437 case IWM_TIME_QUOTA_CMD:
5438 case IWM_REMOVE_STA:
5439 case IWM_TXPATH_FLUSH:
5442 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5443 cresp = (void *)pkt->data;
5444 if (sc->sc_wantresp == ((qid << 16) | idx)) {
5445 memcpy(sc->sc_cmd_resp,
5446 pkt, sizeof(*pkt)+sizeof(*cresp));
5451 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5454 case IWM_INIT_COMPLETE_NOTIF:
5455 sc->sc_init_complete = 1;
5456 wakeup(&sc->sc_init_complete);
5459 case IWM_SCAN_OFFLOAD_COMPLETE: {
5460 struct iwm_periodic_scan_complete *notif;
5461 notif = (void *)pkt->data;
5465 case IWM_SCAN_ITERATION_COMPLETE: {
5466 struct iwm_lmac_scan_complete_notif *notif;
5467 notif = (void *)pkt->data;
5468 ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5472 case IWM_SCAN_COMPLETE_UMAC: {
5473 struct iwm_umac_scan_complete *notif;
5474 notif = (void *)pkt->data;
5476 IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5477 "UMAC scan complete, status=0x%x\n",
5479 #if 0 /* XXX This would be a duplicate scan end call */
5480 taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5485 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5486 struct iwm_umac_scan_iter_complete_notif *notif;
5487 notif = (void *)pkt->data;
5489 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5490 "complete, status=0x%x, %d channels scanned\n",
5491 notif->status, notif->scanned_channels);
5492 ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5496 case IWM_REPLY_ERROR: {
5497 struct iwm_error_resp *resp;
5498 resp = (void *)pkt->data;
5500 device_printf(sc->sc_dev,
5501 "firmware error 0x%x, cmd 0x%x\n",
5502 le32toh(resp->error_type),
5507 case IWM_TIME_EVENT_NOTIFICATION: {
5508 struct iwm_time_event_notif *notif;
5509 notif = (void *)pkt->data;
5511 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5512 "TE notif status = 0x%x action = 0x%x\n",
5513 notif->status, notif->action);
5517 case IWM_MCAST_FILTER_CMD:
5520 case IWM_SCD_QUEUE_CFG: {
5521 struct iwm_scd_txq_cfg_rsp *rsp;
5522 rsp = (void *)pkt->data;
5524 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5525 "queue cfg token=0x%x sta_id=%d "
5526 "tid=%d scd_queue=%d\n",
5527 rsp->token, rsp->sta_id, rsp->tid,
5533 device_printf(sc->sc_dev,
5534 "frame %d/%d %x UNHANDLED (this should "
5535 "not happen)\n", qid, idx,
5541 * Why test bit 0x80? The Linux driver:
5543 * There is one exception: uCode sets bit 15 when it
5544 * originates the response/notification, i.e. when the
5545 * response/notification is not a direct response to a
5546 * command sent by the driver. For example, uCode issues
5547 * IWM_REPLY_RX when it sends a received frame to the driver;
5548 * it is not a direct response to any driver command.
5550 * Ok, so since when is 7 == 15? Well, the Linux driver
5551 * uses a slightly different format for pkt->hdr, and "qid"
5552 * is actually the upper byte of a two-byte field.
5554 if (!(pkt->hdr.qid & (1 << 7))) {
5555 iwm_cmd_done(sc, pkt);
5561 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
5562 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5565 * Tell the firmware what we have processed.
5566 * Seems like the hardware gets upset unless we align
5569 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5570 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5576 struct iwm_softc *sc = arg;
5582 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5584 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5585 uint32_t *ict = sc->ict_dma.vaddr;
5588 tmp = htole32(ict[sc->ict_cur]);
5593 * ok, there was something. keep plowing until we have all.
5598 ict[sc->ict_cur] = 0;
5599 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5600 tmp = htole32(ict[sc->ict_cur]);
5603 /* this is where the fun begins. don't ask */
5604 if (r1 == 0xffffffff)
5607 /* i am not expected to understand this */
5610 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5612 r1 = IWM_READ(sc, IWM_CSR_INT);
5613 /* "hardware gone" (where, fishing?) */
5614 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5616 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5618 if (r1 == 0 && r2 == 0) {
5622 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5625 handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
5627 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5629 struct ieee80211com *ic = &sc->sc_ic;
5630 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5635 /* Dump driver status (TX and RX rings) while we're here. */
5636 device_printf(sc->sc_dev, "driver status:\n");
5637 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5638 struct iwm_tx_ring *ring = &sc->txq[i];
5639 device_printf(sc->sc_dev,
5640 " tx ring %2d: qid=%-2d cur=%-3d "
5642 i, ring->qid, ring->cur, ring->queued);
5644 device_printf(sc->sc_dev,
5645 " rx ring: cur=%d\n", sc->rxq.cur);
5646 device_printf(sc->sc_dev,
5647 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5649 /* Don't stop the device; just do a VAP restart */
5653 printf("%s: null vap\n", __func__);
5657 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5658 "restarting\n", __func__, vap->iv_state);
5660 /* XXX TODO: turn this into a callout/taskqueue */
5661 ieee80211_restart_all(ic);
5665 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5666 handled |= IWM_CSR_INT_BIT_HW_ERR;
5667 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5673 /* firmware chunk loaded */
5674 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5675 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5676 handled |= IWM_CSR_INT_BIT_FH_TX;
5677 sc->sc_fw_chunk_done = 1;
5681 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5682 handled |= IWM_CSR_INT_BIT_RF_KILL;
5683 if (iwm_check_rfkill(sc)) {
5684 device_printf(sc->sc_dev,
5685 "%s: rfkill switch, disabling interface\n",
5692 * The Linux driver uses periodic interrupts to avoid races.
5693 * We cargo-cult like it's going out of fashion.
5695 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5696 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5697 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5698 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5700 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5704 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5705 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5706 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5710 /* enable periodic interrupt, see above */
5711 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5712 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5713 IWM_CSR_INT_PERIODIC_ENA);
5716 if (__predict_false(r1 & ~handled))
5717 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5718 "%s: unhandled interrupts: %x\n", __func__, r1);
5722 iwm_restore_interrupts(sc);
5729 * Autoconf glue-sniffing
5731 #define PCI_VENDOR_INTEL 0x8086
5732 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3
5733 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4
5734 #define PCI_PRODUCT_INTEL_WL_3165_1 0x3165
5735 #define PCI_PRODUCT_INTEL_WL_3165_2 0x3166
5736 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1
5737 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2
5738 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a
5739 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b
5740 #define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3
5741 #define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4
5743 static const struct iwm_devices {
5747 { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5748 { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5749 { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5750 { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5751 { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5752 { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5753 { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5754 { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5755 { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5756 { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5760 iwm_probe(device_t dev)
5764 for (i = 0; i < nitems(iwm_devices); i++) {
5765 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5766 pci_get_device(dev) == iwm_devices[i].device) {
5767 device_set_desc(dev, iwm_devices[i].name);
5768 return (BUS_PROBE_DEFAULT);
5776 iwm_dev_check(device_t dev)
5778 struct iwm_softc *sc;
5780 sc = device_get_softc(dev);
5782 switch (pci_get_device(dev)) {
5783 case PCI_PRODUCT_INTEL_WL_3160_1:
5784 case PCI_PRODUCT_INTEL_WL_3160_2:
5785 sc->cfg = &iwm3160_cfg;
5786 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5788 case PCI_PRODUCT_INTEL_WL_3165_1:
5789 case PCI_PRODUCT_INTEL_WL_3165_2:
5790 sc->cfg = &iwm3165_cfg;
5791 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5793 case PCI_PRODUCT_INTEL_WL_7260_1:
5794 case PCI_PRODUCT_INTEL_WL_7260_2:
5795 sc->cfg = &iwm7260_cfg;
5796 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5798 case PCI_PRODUCT_INTEL_WL_7265_1:
5799 case PCI_PRODUCT_INTEL_WL_7265_2:
5800 sc->cfg = &iwm7265_cfg;
5801 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5803 case PCI_PRODUCT_INTEL_WL_8260_1:
5804 case PCI_PRODUCT_INTEL_WL_8260_2:
5805 sc->cfg = &iwm8260_cfg;
5806 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
5809 device_printf(dev, "unknown adapter type\n");
5815 iwm_pci_attach(device_t dev)
5817 struct iwm_softc *sc;
5818 int count, error, rid;
5821 sc = device_get_softc(dev);
5823 /* Clear device-specific "PCI retry timeout" register (41h). */
5824 reg = pci_read_config(dev, 0x40, sizeof(reg));
5825 pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5827 /* Enable bus-mastering and hardware bug workaround. */
5828 pci_enable_busmaster(dev);
5829 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5831 if (reg & PCIM_STATUS_INTxSTATE) {
5832 reg &= ~PCIM_STATUS_INTxSTATE;
5834 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5837 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5839 if (sc->sc_mem == NULL) {
5840 device_printf(sc->sc_dev, "can't map mem space\n");
5843 sc->sc_st = rman_get_bustag(sc->sc_mem);
5844 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5846 /* Install interrupt handler. */
5849 if (pci_alloc_msi(dev, &count) == 0)
5851 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5852 (rid != 0 ? 0 : RF_SHAREABLE));
5853 if (sc->sc_irq == NULL) {
5854 device_printf(dev, "can't map interrupt\n");
5857 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5858 NULL, iwm_intr, sc, &sc->sc_ih);
5859 if (sc->sc_ih == NULL) {
5860 device_printf(dev, "can't establish interrupt");
5863 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5869 iwm_pci_detach(device_t dev)
5871 struct iwm_softc *sc = device_get_softc(dev);
5873 if (sc->sc_irq != NULL) {
5874 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5875 bus_release_resource(dev, SYS_RES_IRQ,
5876 rman_get_rid(sc->sc_irq), sc->sc_irq);
5877 pci_release_msi(dev);
5879 if (sc->sc_mem != NULL)
5880 bus_release_resource(dev, SYS_RES_MEMORY,
5881 rman_get_rid(sc->sc_mem), sc->sc_mem);
5887 iwm_attach(device_t dev)
5889 struct iwm_softc *sc = device_get_softc(dev);
5890 struct ieee80211com *ic = &sc->sc_ic;
5895 sc->sc_attached = 1;
5897 mbufq_init(&sc->sc_snd, ifqmaxlen);
5898 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5899 callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5900 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5902 sc->sc_notif_wait = iwm_notification_wait_init(sc);
5903 if (sc->sc_notif_wait == NULL) {
5904 device_printf(dev, "failed to init notification wait struct\n");
5909 sc->sc_phy_db = iwm_phy_db_init(sc);
5910 if (!sc->sc_phy_db) {
5911 device_printf(dev, "Cannot init phy_db\n");
5916 error = iwm_pci_attach(dev);
5920 sc->sc_wantresp = -1;
5922 /* Check device type */
5923 error = iwm_dev_check(dev);
5927 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5929 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5930 * changed, and now the revision step also includes bit 0-1 (no more
5931 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5932 * in the old format.
5934 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
5935 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5936 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5938 if (iwm_prepare_card_hw(sc) != 0) {
5939 device_printf(dev, "could not initialize hardware\n");
5943 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5948 * In order to recognize C step the driver should read the
5949 * chip version id located at the AUX bus MISC address.
5951 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5952 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5955 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5956 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5957 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5960 device_printf(sc->sc_dev,
5961 "Failed to wake up the nic\n");
5965 if (iwm_nic_lock(sc)) {
5966 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5967 hw_step |= IWM_ENABLE_WFPM;
5968 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5969 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5970 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5972 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5973 (IWM_SILICON_C_STEP << 2);
5976 device_printf(sc->sc_dev, "Failed to lock the nic\n");
5981 /* special-case 7265D, it has the same PCI IDs. */
5982 if (sc->cfg == &iwm7265_cfg &&
5983 (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5984 sc->cfg = &iwm7265d_cfg;
5987 /* Allocate DMA memory for firmware transfers. */
5988 if ((error = iwm_alloc_fwmem(sc)) != 0) {
5989 device_printf(dev, "could not allocate memory for firmware\n");
5993 /* Allocate "Keep Warm" page. */
5994 if ((error = iwm_alloc_kw(sc)) != 0) {
5995 device_printf(dev, "could not allocate keep warm page\n");
5999 /* We use ICT interrupts */
6000 if ((error = iwm_alloc_ict(sc)) != 0) {
6001 device_printf(dev, "could not allocate ICT table\n");
6005 /* Allocate TX scheduler "rings". */
6006 if ((error = iwm_alloc_sched(sc)) != 0) {
6007 device_printf(dev, "could not allocate TX scheduler rings\n");
6011 /* Allocate TX rings */
6012 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6013 if ((error = iwm_alloc_tx_ring(sc,
6014 &sc->txq[txq_i], txq_i)) != 0) {
6016 "could not allocate TX ring %d\n",
6022 /* Allocate RX ring. */
6023 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6024 device_printf(dev, "could not allocate RX ring\n");
6028 /* Clear pending interrupts. */
6029 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6032 ic->ic_name = device_get_nameunit(sc->sc_dev);
6033 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
6034 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
6036 /* Set device capabilities. */
6039 IEEE80211_C_WPA | /* WPA/RSN */
6041 IEEE80211_C_SHSLOT | /* short slot time supported */
6042 IEEE80211_C_SHPREAMBLE /* short preamble supported */
6043 // IEEE80211_C_BGSCAN /* capable of bg scanning */
6045 /* Advertise full-offload scanning */
6046 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6047 for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6048 sc->sc_phyctxt[i].id = i;
6049 sc->sc_phyctxt[i].color = 0;
6050 sc->sc_phyctxt[i].ref = 0;
6051 sc->sc_phyctxt[i].channel = NULL;
6054 /* Default noise floor */
6058 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6060 sc->sc_preinit_hook.ich_func = iwm_preinit;
6061 sc->sc_preinit_hook.ich_arg = sc;
6062 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6063 device_printf(dev, "config_intrhook_establish failed\n");
6068 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6069 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6070 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6073 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6074 "<-%s\n", __func__);
6078 /* Free allocated memory if something failed during attachment. */
6080 iwm_detach_local(sc, 0);
6086 iwm_is_valid_ether_addr(uint8_t *addr)
6088 char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6090 if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6097 iwm_update_edca(struct ieee80211com *ic)
6099 struct iwm_softc *sc = ic->ic_softc;
6101 device_printf(sc->sc_dev, "%s: called\n", __func__);
6106 iwm_preinit(void *arg)
6108 struct iwm_softc *sc = arg;
6109 device_t dev = sc->sc_dev;
6110 struct ieee80211com *ic = &sc->sc_ic;
6113 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6114 "->%s\n", __func__);
6117 if ((error = iwm_start_hw(sc)) != 0) {
6118 device_printf(dev, "could not initialize hardware\n");
6123 error = iwm_run_init_mvm_ucode(sc, 1);
6124 iwm_stop_device(sc);
6130 "hw rev 0x%x, fw ver %s, address %s\n",
6131 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6132 sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6134 /* not all hardware can do 5GHz band */
6135 if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6136 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6137 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6140 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6144 * At this point we've committed - if we fail to do setup,
6145 * we now also have to tear down the net80211 state.
6147 ieee80211_ifattach(ic);
6148 ic->ic_vap_create = iwm_vap_create;
6149 ic->ic_vap_delete = iwm_vap_delete;
6150 ic->ic_raw_xmit = iwm_raw_xmit;
6151 ic->ic_node_alloc = iwm_node_alloc;
6152 ic->ic_scan_start = iwm_scan_start;
6153 ic->ic_scan_end = iwm_scan_end;
6154 ic->ic_update_mcast = iwm_update_mcast;
6155 ic->ic_getradiocaps = iwm_init_channel_map;
6156 ic->ic_set_channel = iwm_set_channel;
6157 ic->ic_scan_curchan = iwm_scan_curchan;
6158 ic->ic_scan_mindwell = iwm_scan_mindwell;
6159 ic->ic_wme.wme_update = iwm_update_edca;
6160 ic->ic_parent = iwm_parent;
6161 ic->ic_transmit = iwm_transmit;
6162 iwm_radiotap_attach(sc);
6164 ieee80211_announce(ic);
6166 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6167 "<-%s\n", __func__);
6168 config_intrhook_disestablish(&sc->sc_preinit_hook);
6172 config_intrhook_disestablish(&sc->sc_preinit_hook);
6173 iwm_detach_local(sc, 0);
6177 * Attach the interface to 802.11 radiotap.
6180 iwm_radiotap_attach(struct iwm_softc *sc)
6182 struct ieee80211com *ic = &sc->sc_ic;
6184 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6185 "->%s begin\n", __func__);
6186 ieee80211_radiotap_attach(ic,
6187 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6188 IWM_TX_RADIOTAP_PRESENT,
6189 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6190 IWM_RX_RADIOTAP_PRESENT);
6191 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6192 "->%s end\n", __func__);
6195 static struct ieee80211vap *
6196 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6197 enum ieee80211_opmode opmode, int flags,
6198 const uint8_t bssid[IEEE80211_ADDR_LEN],
6199 const uint8_t mac[IEEE80211_ADDR_LEN])
6201 struct iwm_vap *ivp;
6202 struct ieee80211vap *vap;
6204 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
6206 ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6208 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6209 vap->iv_bmissthreshold = 10; /* override default */
6210 /* Override with driver methods. */
6211 ivp->iv_newstate = vap->iv_newstate;
6212 vap->iv_newstate = iwm_newstate;
6214 ieee80211_ratectl_init(vap);
6215 /* Complete setup. */
6216 ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6218 ic->ic_opmode = opmode;
6224 iwm_vap_delete(struct ieee80211vap *vap)
6226 struct iwm_vap *ivp = IWM_VAP(vap);
6228 ieee80211_ratectl_deinit(vap);
6229 ieee80211_vap_detach(vap);
6230 free(ivp, M_80211_VAP);
6234 iwm_scan_start(struct ieee80211com *ic)
6236 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6237 struct iwm_softc *sc = ic->ic_softc;
6241 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6242 error = iwm_mvm_umac_scan(sc);
6244 error = iwm_mvm_lmac_scan(sc);
6246 device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
6248 ieee80211_cancel_scan(vap);
6250 iwm_led_blink_start(sc);
6256 iwm_scan_end(struct ieee80211com *ic)
6258 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6259 struct iwm_softc *sc = ic->ic_softc;
6262 iwm_led_blink_stop(sc);
6263 if (vap->iv_state == IEEE80211_S_RUN)
6264 iwm_mvm_led_enable(sc);
6269 iwm_update_mcast(struct ieee80211com *ic)
6274 iwm_set_channel(struct ieee80211com *ic)
6279 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6284 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6290 iwm_init_task(void *arg1)
6292 struct iwm_softc *sc = arg1;
6295 while (sc->sc_flags & IWM_FLAG_BUSY)
6296 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6297 sc->sc_flags |= IWM_FLAG_BUSY;
6299 if (sc->sc_ic.ic_nrunning > 0)
6301 sc->sc_flags &= ~IWM_FLAG_BUSY;
6302 wakeup(&sc->sc_flags);
6307 iwm_resume(device_t dev)
6309 struct iwm_softc *sc = device_get_softc(dev);
6313 /* Clear device-specific "PCI retry timeout" register (41h). */
6314 reg = pci_read_config(dev, 0x40, sizeof(reg));
6315 pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
6316 iwm_init_task(device_get_softc(dev));
6319 if (sc->sc_flags & IWM_FLAG_SCANNING) {
6320 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6326 ieee80211_resume_all(&sc->sc_ic);
6332 iwm_suspend(device_t dev)
6335 struct iwm_softc *sc = device_get_softc(dev);
6337 do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6339 ieee80211_suspend_all(&sc->sc_ic);
6344 sc->sc_flags |= IWM_FLAG_SCANNING;
6352 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6354 struct iwm_fw_info *fw = &sc->sc_fw;
6355 device_t dev = sc->sc_dev;
6358 if (!sc->sc_attached)
6360 sc->sc_attached = 0;
6363 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6365 callout_drain(&sc->sc_led_blink_to);
6366 callout_drain(&sc->sc_watchdog_to);
6367 iwm_stop_device(sc);
6369 ieee80211_ifdetach(&sc->sc_ic);
6372 iwm_phy_db_free(sc->sc_phy_db);
6373 sc->sc_phy_db = NULL;
6375 iwm_free_nvm_data(sc->nvm_data);
6377 /* Free descriptor rings */
6378 iwm_free_rx_ring(sc, &sc->rxq);
6379 for (i = 0; i < nitems(sc->txq); i++)
6380 iwm_free_tx_ring(sc, &sc->txq[i]);
6383 if (fw->fw_fp != NULL)
6384 iwm_fw_info_free(fw);
6386 /* Free scheduler */
6387 iwm_dma_contig_free(&sc->sched_dma);
6388 iwm_dma_contig_free(&sc->ict_dma);
6389 iwm_dma_contig_free(&sc->kw_dma);
6390 iwm_dma_contig_free(&sc->fw_dma);
6392 /* Finished with the hardware - detach things */
6393 iwm_pci_detach(dev);
6395 if (sc->sc_notif_wait != NULL) {
6396 iwm_notification_wait_free(sc->sc_notif_wait);
6397 sc->sc_notif_wait = NULL;
6400 mbufq_drain(&sc->sc_snd);
6401 IWM_LOCK_DESTROY(sc);
6407 iwm_detach(device_t dev)
6409 struct iwm_softc *sc = device_get_softc(dev);
6411 return (iwm_detach_local(sc, 1));
6414 static device_method_t iwm_pci_methods[] = {
6415 /* Device interface */
6416 DEVMETHOD(device_probe, iwm_probe),
6417 DEVMETHOD(device_attach, iwm_attach),
6418 DEVMETHOD(device_detach, iwm_detach),
6419 DEVMETHOD(device_suspend, iwm_suspend),
6420 DEVMETHOD(device_resume, iwm_resume),
6425 static driver_t iwm_pci_driver = {
6428 sizeof (struct iwm_softc)
6431 static devclass_t iwm_devclass;
6433 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6434 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6435 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6436 MODULE_DEPEND(iwm, wlan, 1, 1, 1);