1 /* $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $ */
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
27 ***********************************************************************
29 * This file is provided under a dual BSD/GPLv2 license. When using or
30 * redistributing this file, you may do so under either license.
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
53 * Contact Information:
54 * Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
67 * * Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * * Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in
71 * the documentation and/or other materials provided with the
73 * * Neither the name Intel Corporation nor the names of its
74 * contributors may be used to endorse or promote products derived
75 * from this software without specific prior written permission.
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
108 #include "opt_wlan.h"
110 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_notif_wait.h>
157 #include <dev/iwm/if_iwm_util.h>
158 #include <dev/iwm/if_iwm_binding.h>
159 #include <dev/iwm/if_iwm_phy_db.h>
160 #include <dev/iwm/if_iwm_mac_ctxt.h>
161 #include <dev/iwm/if_iwm_phy_ctxt.h>
162 #include <dev/iwm/if_iwm_time_event.h>
163 #include <dev/iwm/if_iwm_power.h>
164 #include <dev/iwm/if_iwm_scan.h>
166 #include <dev/iwm/if_iwm_pcie_trans.h>
167 #include <dev/iwm/if_iwm_led.h>
169 #define IWM_NVM_HW_SECTION_NUM_FAMILY_7000 0
170 #define IWM_NVM_HW_SECTION_NUM_FAMILY_8000 10
172 /* lower blocks contain EEPROM image and calibration data */
173 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(uint16_t)) /* 16 KB */
174 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(uint16_t)) /* 32 KB */
176 #define IWM7260_FW "iwm7260fw"
177 #define IWM3160_FW "iwm3160fw"
178 #define IWM7265_FW "iwm7265fw"
179 #define IWM7265D_FW "iwm7265Dfw"
180 #define IWM8000_FW "iwm8000Cfw"
182 #define IWM_DEVICE_7000_COMMON \
183 .device_family = IWM_DEVICE_FAMILY_7000, \
184 .eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000, \
185 .nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_7000, \
188 const struct iwm_cfg iwm7260_cfg = {
189 .fw_name = IWM7260_FW,
190 IWM_DEVICE_7000_COMMON,
191 .host_interrupt_operation_mode = 1,
194 const struct iwm_cfg iwm3160_cfg = {
195 .fw_name = IWM3160_FW,
196 IWM_DEVICE_7000_COMMON,
197 .host_interrupt_operation_mode = 1,
200 const struct iwm_cfg iwm3165_cfg = {
201 /* XXX IWM7265D_FW doesn't seem to work properly yet */
202 .fw_name = IWM7265_FW,
203 IWM_DEVICE_7000_COMMON,
204 .host_interrupt_operation_mode = 0,
207 const struct iwm_cfg iwm7265_cfg = {
208 .fw_name = IWM7265_FW,
209 IWM_DEVICE_7000_COMMON,
210 .host_interrupt_operation_mode = 0,
213 const struct iwm_cfg iwm7265d_cfg = {
214 /* XXX IWM7265D_FW doesn't seem to work properly yet */
215 .fw_name = IWM7265_FW,
216 IWM_DEVICE_7000_COMMON,
217 .host_interrupt_operation_mode = 0,
220 #define IWM_DEVICE_8000_COMMON \
221 .device_family = IWM_DEVICE_FAMILY_8000, \
222 .eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000, \
223 .nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_8000
225 const struct iwm_cfg iwm8260_cfg = {
226 .fw_name = IWM8000_FW,
227 IWM_DEVICE_8000_COMMON,
228 .host_interrupt_operation_mode = 0,
231 const uint8_t iwm_nvm_channels[] = {
233 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
235 36, 40, 44, 48, 52, 56, 60, 64,
236 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
237 149, 153, 157, 161, 165
239 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
240 "IWM_NUM_CHANNELS is too small");
242 const uint8_t iwm_nvm_channels_8000[] = {
244 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
246 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
247 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
248 149, 153, 157, 161, 165, 169, 173, 177, 181
250 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
251 "IWM_NUM_CHANNELS_8000 is too small");
253 #define IWM_NUM_2GHZ_CHANNELS 14
254 #define IWM_N_HW_ADDR_MASK 0xF
257 * XXX For now, there's simply a fixed set of rate table entries
258 * that are populated.
260 const struct iwm_rate {
264 { 2, IWM_RATE_1M_PLCP },
265 { 4, IWM_RATE_2M_PLCP },
266 { 11, IWM_RATE_5M_PLCP },
267 { 22, IWM_RATE_11M_PLCP },
268 { 12, IWM_RATE_6M_PLCP },
269 { 18, IWM_RATE_9M_PLCP },
270 { 24, IWM_RATE_12M_PLCP },
271 { 36, IWM_RATE_18M_PLCP },
272 { 48, IWM_RATE_24M_PLCP },
273 { 72, IWM_RATE_36M_PLCP },
274 { 96, IWM_RATE_48M_PLCP },
275 { 108, IWM_RATE_54M_PLCP },
277 #define IWM_RIDX_CCK 0
278 #define IWM_RIDX_OFDM 4
279 #define IWM_RIDX_MAX (nitems(iwm_rates)-1)
280 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
281 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
283 struct iwm_nvm_section {
288 #define IWM_MVM_UCODE_ALIVE_TIMEOUT hz
289 #define IWM_MVM_UCODE_CALIB_TIMEOUT (2*hz)
291 struct iwm_mvm_alive_data {
293 uint32_t scd_base_addr;
296 static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
297 static int iwm_firmware_store_section(struct iwm_softc *,
299 const uint8_t *, size_t);
300 static int iwm_set_default_calib(struct iwm_softc *, const void *);
301 static void iwm_fw_info_free(struct iwm_fw_info *);
302 static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
303 static void iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
304 static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
305 bus_size_t, bus_size_t);
306 static void iwm_dma_contig_free(struct iwm_dma_info *);
307 static int iwm_alloc_fwmem(struct iwm_softc *);
308 static int iwm_alloc_sched(struct iwm_softc *);
309 static int iwm_alloc_kw(struct iwm_softc *);
310 static int iwm_alloc_ict(struct iwm_softc *);
311 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
312 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
313 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
314 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
316 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
317 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
318 static void iwm_enable_interrupts(struct iwm_softc *);
319 static void iwm_restore_interrupts(struct iwm_softc *);
320 static void iwm_disable_interrupts(struct iwm_softc *);
321 static void iwm_ict_reset(struct iwm_softc *);
322 static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
323 static void iwm_stop_device(struct iwm_softc *);
324 static void iwm_mvm_nic_config(struct iwm_softc *);
325 static int iwm_nic_rx_init(struct iwm_softc *);
326 static int iwm_nic_tx_init(struct iwm_softc *);
327 static int iwm_nic_init(struct iwm_softc *);
328 static int iwm_enable_txq(struct iwm_softc *, int, int, int);
329 static int iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
330 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
331 uint16_t, uint8_t *, uint16_t *);
332 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
333 uint16_t *, uint32_t);
334 static uint32_t iwm_eeprom_channel_flags(uint16_t);
335 static void iwm_add_channel_band(struct iwm_softc *,
336 struct ieee80211_channel[], int, int *, int, size_t,
338 static void iwm_init_channel_map(struct ieee80211com *, int, int *,
339 struct ieee80211_channel[]);
340 static struct iwm_nvm_data *
341 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
342 const uint16_t *, const uint16_t *,
343 const uint16_t *, const uint16_t *,
345 static void iwm_free_nvm_data(struct iwm_nvm_data *);
346 static void iwm_set_hw_address_family_8000(struct iwm_softc *,
347 struct iwm_nvm_data *,
350 static int iwm_get_sku(const struct iwm_softc *, const uint16_t *,
352 static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
353 static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
355 static int iwm_get_n_hw_addrs(const struct iwm_softc *,
357 static void iwm_set_radio_cfg(const struct iwm_softc *,
358 struct iwm_nvm_data *, uint32_t);
359 static struct iwm_nvm_data *
360 iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
361 static int iwm_nvm_init(struct iwm_softc *);
362 static int iwm_pcie_load_section(struct iwm_softc *, uint8_t,
363 const struct iwm_fw_desc *);
364 static int iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
365 bus_addr_t, uint32_t);
366 static int iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
367 const struct iwm_fw_sects *,
369 static int iwm_pcie_load_cpu_sections(struct iwm_softc *,
370 const struct iwm_fw_sects *,
372 static int iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
373 const struct iwm_fw_sects *);
374 static int iwm_pcie_load_given_ucode(struct iwm_softc *,
375 const struct iwm_fw_sects *);
376 static int iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
377 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
378 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
379 static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
380 enum iwm_ucode_type);
381 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
382 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
383 static int iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
384 static int iwm_mvm_get_signal_strength(struct iwm_softc *,
385 struct iwm_rx_phy_info *);
386 static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
387 struct iwm_rx_packet *,
388 struct iwm_rx_data *);
389 static int iwm_get_noise(struct iwm_softc *sc,
390 const struct iwm_mvm_statistics_rx_non_phy *);
391 static void iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
392 struct iwm_rx_data *);
393 static int iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
394 struct iwm_rx_packet *,
396 static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
397 struct iwm_rx_data *);
398 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
400 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
403 static const struct iwm_rate *
404 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
405 struct mbuf *, struct iwm_tx_cmd *);
406 static int iwm_tx(struct iwm_softc *, struct mbuf *,
407 struct ieee80211_node *, int);
408 static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
409 const struct ieee80211_bpf_params *);
410 static int iwm_mvm_flush_tx_path(struct iwm_softc *sc,
411 uint32_t tfd_msk, uint32_t flags);
412 static int iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
413 struct iwm_mvm_add_sta_cmd_v7 *,
415 static int iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
417 static int iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
418 static int iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
419 static int iwm_mvm_add_int_sta_common(struct iwm_softc *,
420 struct iwm_int_sta *,
421 const uint8_t *, uint16_t, uint16_t);
422 static int iwm_mvm_add_aux_sta(struct iwm_softc *);
423 static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
424 static int iwm_auth(struct ieee80211vap *, struct iwm_softc *);
425 static int iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
426 static int iwm_release(struct iwm_softc *, struct iwm_node *);
427 static struct ieee80211_node *
428 iwm_node_alloc(struct ieee80211vap *,
429 const uint8_t[IEEE80211_ADDR_LEN]);
430 static void iwm_setrates(struct iwm_softc *, struct iwm_node *);
431 static int iwm_media_change(struct ifnet *);
432 static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
433 static void iwm_endscan_cb(void *, int);
434 static void iwm_mvm_fill_sf_command(struct iwm_softc *,
435 struct iwm_sf_cfg_cmd *,
436 struct ieee80211_node *);
437 static int iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
438 static int iwm_send_bt_init_conf(struct iwm_softc *);
439 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
440 static void iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
441 static int iwm_init_hw(struct iwm_softc *);
442 static void iwm_init(struct iwm_softc *);
443 static void iwm_start(struct iwm_softc *);
444 static void iwm_stop(struct iwm_softc *);
445 static void iwm_watchdog(void *);
446 static void iwm_parent(struct ieee80211com *);
449 iwm_desc_lookup(uint32_t);
450 static void iwm_nic_error(struct iwm_softc *);
451 static void iwm_nic_umac_error(struct iwm_softc *);
453 static void iwm_notif_intr(struct iwm_softc *);
454 static void iwm_intr(void *);
455 static int iwm_attach(device_t);
456 static int iwm_is_valid_ether_addr(uint8_t *);
457 static void iwm_preinit(void *);
458 static int iwm_detach_local(struct iwm_softc *sc, int);
459 static void iwm_init_task(void *);
460 static void iwm_radiotap_attach(struct iwm_softc *);
461 static struct ieee80211vap *
462 iwm_vap_create(struct ieee80211com *,
463 const char [IFNAMSIZ], int,
464 enum ieee80211_opmode, int,
465 const uint8_t [IEEE80211_ADDR_LEN],
466 const uint8_t [IEEE80211_ADDR_LEN]);
467 static void iwm_vap_delete(struct ieee80211vap *);
468 static void iwm_scan_start(struct ieee80211com *);
469 static void iwm_scan_end(struct ieee80211com *);
470 static void iwm_update_mcast(struct ieee80211com *);
471 static void iwm_set_channel(struct ieee80211com *);
472 static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
473 static void iwm_scan_mindwell(struct ieee80211_scan_state *);
474 static int iwm_detach(device_t);
481 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
483 const struct iwm_fw_cscheme_list *l = (const void *)data;
485 if (dlen < sizeof(*l) ||
486 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
489 /* we don't actually store anything for now, always use s/w crypto */
495 iwm_firmware_store_section(struct iwm_softc *sc,
496 enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
498 struct iwm_fw_sects *fws;
499 struct iwm_fw_desc *fwone;
501 if (type >= IWM_UCODE_TYPE_MAX)
503 if (dlen < sizeof(uint32_t))
506 fws = &sc->sc_fw.fw_sects[type];
507 if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
510 fwone = &fws->fw_sect[fws->fw_count];
512 /* first 32bit are device load offset */
513 memcpy(&fwone->offset, data, sizeof(uint32_t));
516 fwone->data = data + sizeof(uint32_t);
517 fwone->len = dlen - sizeof(uint32_t);
524 #define IWM_DEFAULT_SCAN_CHANNELS 40
526 /* iwlwifi: iwl-drv.c */
527 struct iwm_tlv_calib_data {
529 struct iwm_tlv_calib_ctrl calib;
533 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
535 const struct iwm_tlv_calib_data *def_calib = data;
536 uint32_t ucode_type = le32toh(def_calib->ucode_type);
538 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
539 device_printf(sc->sc_dev,
540 "Wrong ucode_type %u for default "
541 "calibration.\n", ucode_type);
545 sc->sc_default_calib[ucode_type].flow_trigger =
546 def_calib->calib.flow_trigger;
547 sc->sc_default_calib[ucode_type].event_trigger =
548 def_calib->calib.event_trigger;
554 iwm_fw_info_free(struct iwm_fw_info *fw)
556 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
558 /* don't touch fw->fw_status */
559 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
563 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
565 struct iwm_fw_info *fw = &sc->sc_fw;
566 const struct iwm_tlv_ucode_header *uhdr;
567 struct iwm_ucode_tlv tlv;
568 enum iwm_ucode_tlv_type tlv_type;
569 const struct firmware *fwp;
571 uint32_t usniffer_img;
572 uint32_t paging_mem_size;
577 if (fw->fw_status == IWM_FW_STATUS_DONE &&
578 ucode_type != IWM_UCODE_INIT)
581 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
582 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
583 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
585 if (fw->fw_fp != NULL)
586 iwm_fw_info_free(fw);
589 * Load firmware into driver memory.
593 fwp = firmware_get(sc->cfg->fw_name);
596 device_printf(sc->sc_dev,
597 "could not read firmware %s (error %d)\n",
598 sc->cfg->fw_name, error);
603 /* (Re-)Initialize default values. */
604 sc->sc_capaflags = 0;
605 sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
606 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
607 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
610 * Parse firmware contents
613 uhdr = (const void *)fw->fw_fp->data;
614 if (*(const uint32_t *)fw->fw_fp->data != 0
615 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
616 device_printf(sc->sc_dev, "invalid firmware %s\n",
622 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
623 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
624 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
625 IWM_UCODE_API(le32toh(uhdr->ver)));
627 len = fw->fw_fp->datasize - sizeof(*uhdr);
629 while (len >= sizeof(tlv)) {
631 const void *tlv_data;
633 memcpy(&tlv, data, sizeof(tlv));
634 tlv_len = le32toh(tlv.length);
635 tlv_type = le32toh(tlv.type);
642 device_printf(sc->sc_dev,
643 "firmware too short: %zu bytes\n",
649 switch ((int)tlv_type) {
650 case IWM_UCODE_TLV_PROBE_MAX_LEN:
651 if (tlv_len < sizeof(uint32_t)) {
652 device_printf(sc->sc_dev,
653 "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
659 sc->sc_capa_max_probe_len
660 = le32toh(*(const uint32_t *)tlv_data);
661 /* limit it to something sensible */
662 if (sc->sc_capa_max_probe_len >
663 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
664 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
665 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
666 "ridiculous\n", __func__);
671 case IWM_UCODE_TLV_PAN:
673 device_printf(sc->sc_dev,
674 "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
680 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
682 case IWM_UCODE_TLV_FLAGS:
683 if (tlv_len < sizeof(uint32_t)) {
684 device_printf(sc->sc_dev,
685 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
692 * Apparently there can be many flags, but Linux driver
693 * parses only the first one, and so do we.
695 * XXX: why does this override IWM_UCODE_TLV_PAN?
696 * Intentional or a bug? Observations from
697 * current firmware file:
698 * 1) TLV_PAN is parsed first
699 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
700 * ==> this resets TLV_PAN to itself... hnnnk
702 sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
704 case IWM_UCODE_TLV_CSCHEME:
705 if ((error = iwm_store_cscheme(sc,
706 tlv_data, tlv_len)) != 0) {
707 device_printf(sc->sc_dev,
708 "%s: iwm_store_cscheme(): returned %d\n",
714 case IWM_UCODE_TLV_NUM_OF_CPU:
715 if (tlv_len != sizeof(uint32_t)) {
716 device_printf(sc->sc_dev,
717 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
723 num_of_cpus = le32toh(*(const uint32_t *)tlv_data);
724 if (num_of_cpus == 2) {
725 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
727 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
729 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
731 } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
732 device_printf(sc->sc_dev,
733 "%s: Driver supports only 1 or 2 CPUs\n",
739 case IWM_UCODE_TLV_SEC_RT:
740 if ((error = iwm_firmware_store_section(sc,
741 IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
742 device_printf(sc->sc_dev,
743 "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
749 case IWM_UCODE_TLV_SEC_INIT:
750 if ((error = iwm_firmware_store_section(sc,
751 IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
752 device_printf(sc->sc_dev,
753 "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
759 case IWM_UCODE_TLV_SEC_WOWLAN:
760 if ((error = iwm_firmware_store_section(sc,
761 IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
762 device_printf(sc->sc_dev,
763 "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
769 case IWM_UCODE_TLV_DEF_CALIB:
770 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
771 device_printf(sc->sc_dev,
772 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
775 (int) sizeof(struct iwm_tlv_calib_data));
779 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
780 device_printf(sc->sc_dev,
781 "%s: iwm_set_default_calib() failed: %d\n",
787 case IWM_UCODE_TLV_PHY_SKU:
788 if (tlv_len != sizeof(uint32_t)) {
790 device_printf(sc->sc_dev,
791 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
796 sc->sc_fw.phy_config =
797 le32toh(*(const uint32_t *)tlv_data);
798 sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
799 IWM_FW_PHY_CFG_TX_CHAIN) >>
800 IWM_FW_PHY_CFG_TX_CHAIN_POS;
801 sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
802 IWM_FW_PHY_CFG_RX_CHAIN) >>
803 IWM_FW_PHY_CFG_RX_CHAIN_POS;
806 case IWM_UCODE_TLV_API_CHANGES_SET: {
807 const struct iwm_ucode_api *api;
808 if (tlv_len != sizeof(*api)) {
812 api = (const struct iwm_ucode_api *)tlv_data;
813 /* Flags may exceed 32 bits in future firmware. */
814 if (le32toh(api->api_index) > 0) {
815 device_printf(sc->sc_dev,
816 "unsupported API index %d\n",
817 le32toh(api->api_index));
820 sc->sc_ucode_api = le32toh(api->api_flags);
824 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
825 const struct iwm_ucode_capa *capa;
827 if (tlv_len != sizeof(*capa)) {
831 capa = (const struct iwm_ucode_capa *)tlv_data;
832 idx = le32toh(capa->api_index);
833 if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
834 device_printf(sc->sc_dev,
835 "unsupported API index %d\n", idx);
838 for (i = 0; i < 32; i++) {
839 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
841 setbit(sc->sc_enabled_capa, i + (32 * idx));
846 case 48: /* undocumented TLV */
847 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
848 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
849 /* ignore, not used by current driver */
852 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
853 if ((error = iwm_firmware_store_section(sc,
854 IWM_UCODE_REGULAR_USNIFFER, tlv_data,
859 case IWM_UCODE_TLV_PAGING:
860 if (tlv_len != sizeof(uint32_t)) {
864 paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
866 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
867 "%s: Paging: paging enabled (size = %u bytes)\n",
868 __func__, paging_mem_size);
869 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
870 device_printf(sc->sc_dev,
871 "%s: Paging: driver supports up to %u bytes for paging image\n",
872 __func__, IWM_MAX_PAGING_IMAGE_SIZE);
876 if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
877 device_printf(sc->sc_dev,
878 "%s: Paging: image isn't multiple %u\n",
879 __func__, IWM_FW_PAGING_SIZE);
884 sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
886 usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
887 sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
891 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
892 if (tlv_len != sizeof(uint32_t)) {
896 sc->sc_capa_n_scan_channels =
897 le32toh(*(const uint32_t *)tlv_data);
900 case IWM_UCODE_TLV_FW_VERSION:
901 if (tlv_len != sizeof(uint32_t) * 3) {
905 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
907 le32toh(((const uint32_t *)tlv_data)[0]),
908 le32toh(((const uint32_t *)tlv_data)[1]),
909 le32toh(((const uint32_t *)tlv_data)[2]));
912 case IWM_UCODE_TLV_FW_MEM_SEG:
916 device_printf(sc->sc_dev,
917 "%s: unknown firmware section %d, abort\n",
923 len -= roundup(tlv_len, 4);
924 data += roundup(tlv_len, 4);
927 KASSERT(error == 0, ("unhandled error"));
931 device_printf(sc->sc_dev, "firmware parse error %d, "
932 "section type %d\n", error, tlv_type);
935 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
936 device_printf(sc->sc_dev,
937 "device uses unsupported power ops\n");
943 fw->fw_status = IWM_FW_STATUS_NONE;
944 if (fw->fw_fp != NULL)
945 iwm_fw_info_free(fw);
947 fw->fw_status = IWM_FW_STATUS_DONE;
954 * DMA resource routines
958 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
962 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
963 *(bus_addr_t *)arg = segs[0].ds_addr;
967 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
968 bus_size_t size, bus_size_t alignment)
977 error = bus_dma_tag_create(tag, alignment,
978 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
979 1, size, 0, NULL, NULL, &dma->tag);
983 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
984 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
988 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
989 iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
991 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
996 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1001 iwm_dma_contig_free(dma);
1007 iwm_dma_contig_free(struct iwm_dma_info *dma)
1009 if (dma->vaddr != NULL) {
1010 bus_dmamap_sync(dma->tag, dma->map,
1011 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1012 bus_dmamap_unload(dma->tag, dma->map);
1013 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1016 if (dma->tag != NULL) {
1017 bus_dma_tag_destroy(dma->tag);
1022 /* fwmem is used to load firmware onto the card */
1024 iwm_alloc_fwmem(struct iwm_softc *sc)
1026 /* Must be aligned on a 16-byte boundary. */
1027 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
1028 IWM_FH_MEM_TB_MAX_LENGTH, 16);
1031 /* tx scheduler rings. not used? */
1033 iwm_alloc_sched(struct iwm_softc *sc)
1035 /* TX scheduler rings must be aligned on a 1KB boundary. */
1036 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
1037 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
1040 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */
1042 iwm_alloc_kw(struct iwm_softc *sc)
1044 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
1047 /* interrupt cause table */
1049 iwm_alloc_ict(struct iwm_softc *sc)
1051 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
1052 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
1056 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1063 /* Allocate RX descriptors (256-byte aligned). */
1064 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1065 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1067 device_printf(sc->sc_dev,
1068 "could not allocate RX ring DMA memory\n");
1071 ring->desc = ring->desc_dma.vaddr;
1073 /* Allocate RX status area (16-byte aligned). */
1074 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1075 sizeof(*ring->stat), 16);
1077 device_printf(sc->sc_dev,
1078 "could not allocate RX status DMA memory\n");
1081 ring->stat = ring->stat_dma.vaddr;
1083 /* Create RX buffer DMA tag. */
1084 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1085 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1086 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1088 device_printf(sc->sc_dev,
1089 "%s: could not create RX buf DMA tag, error %d\n",
1094 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1095 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1097 device_printf(sc->sc_dev,
1098 "%s: could not create RX buf DMA map, error %d\n",
1103 * Allocate and map RX buffers.
1105 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1106 struct iwm_rx_data *data = &ring->data[i];
1107 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1109 device_printf(sc->sc_dev,
1110 "%s: could not create RX buf DMA map, error %d\n",
1116 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1122 fail: iwm_free_rx_ring(sc, ring);
1127 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1129 /* Reset the ring state */
1133 * The hw rx ring index in shared memory must also be cleared,
1134 * otherwise the discrepancy can cause reprocessing chaos.
1136 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1140 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1144 iwm_dma_contig_free(&ring->desc_dma);
1145 iwm_dma_contig_free(&ring->stat_dma);
1147 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1148 struct iwm_rx_data *data = &ring->data[i];
1150 if (data->m != NULL) {
1151 bus_dmamap_sync(ring->data_dmat, data->map,
1152 BUS_DMASYNC_POSTREAD);
1153 bus_dmamap_unload(ring->data_dmat, data->map);
1157 if (data->map != NULL) {
1158 bus_dmamap_destroy(ring->data_dmat, data->map);
1162 if (ring->spare_map != NULL) {
1163 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1164 ring->spare_map = NULL;
1166 if (ring->data_dmat != NULL) {
1167 bus_dma_tag_destroy(ring->data_dmat);
1168 ring->data_dmat = NULL;
1173 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1185 /* Allocate TX descriptors (256-byte aligned). */
1186 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1187 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1189 device_printf(sc->sc_dev,
1190 "could not allocate TX ring DMA memory\n");
1193 ring->desc = ring->desc_dma.vaddr;
1196 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1197 * to allocate commands space for other rings.
1199 if (qid > IWM_MVM_CMD_QUEUE)
1202 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1203 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1205 device_printf(sc->sc_dev,
1206 "could not allocate TX cmd DMA memory\n");
1209 ring->cmd = ring->cmd_dma.vaddr;
1211 /* FW commands may require more mapped space than packets. */
1212 if (qid == IWM_MVM_CMD_QUEUE) {
1213 maxsize = IWM_RBUF_SIZE;
1217 nsegments = IWM_MAX_SCATTER - 2;
1220 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1221 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1222 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1224 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1228 paddr = ring->cmd_dma.paddr;
1229 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1230 struct iwm_tx_data *data = &ring->data[i];
1232 data->cmd_paddr = paddr;
1233 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1234 + offsetof(struct iwm_tx_cmd, scratch);
1235 paddr += sizeof(struct iwm_device_cmd);
1237 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1239 device_printf(sc->sc_dev,
1240 "could not create TX buf DMA map\n");
1244 KASSERT(paddr == ring->cmd_dma.paddr + size,
1245 ("invalid physical address"));
1248 fail: iwm_free_tx_ring(sc, ring);
1253 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1257 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1258 struct iwm_tx_data *data = &ring->data[i];
1260 if (data->m != NULL) {
1261 bus_dmamap_sync(ring->data_dmat, data->map,
1262 BUS_DMASYNC_POSTWRITE);
1263 bus_dmamap_unload(ring->data_dmat, data->map);
1268 /* Clear TX descriptors. */
1269 memset(ring->desc, 0, ring->desc_dma.size);
1270 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1271 BUS_DMASYNC_PREWRITE);
1272 sc->qfullmsk &= ~(1 << ring->qid);
1276 if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1277 iwm_pcie_clear_cmd_in_flight(sc);
1281 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1285 iwm_dma_contig_free(&ring->desc_dma);
1286 iwm_dma_contig_free(&ring->cmd_dma);
1288 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1289 struct iwm_tx_data *data = &ring->data[i];
1291 if (data->m != NULL) {
1292 bus_dmamap_sync(ring->data_dmat, data->map,
1293 BUS_DMASYNC_POSTWRITE);
1294 bus_dmamap_unload(ring->data_dmat, data->map);
1298 if (data->map != NULL) {
1299 bus_dmamap_destroy(ring->data_dmat, data->map);
1303 if (ring->data_dmat != NULL) {
1304 bus_dma_tag_destroy(ring->data_dmat);
1305 ring->data_dmat = NULL;
1310 * High-level hardware frobbing routines
1314 iwm_enable_interrupts(struct iwm_softc *sc)
1316 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1317 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1321 iwm_restore_interrupts(struct iwm_softc *sc)
1323 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1327 iwm_disable_interrupts(struct iwm_softc *sc)
1329 /* disable interrupts */
1330 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1332 /* acknowledge all interrupts */
1333 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1334 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1338 iwm_ict_reset(struct iwm_softc *sc)
1340 iwm_disable_interrupts(sc);
1342 /* Reset ICT table. */
1343 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1346 /* Set physical address of ICT table (4KB aligned). */
1347 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1348 IWM_CSR_DRAM_INT_TBL_ENABLE
1349 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1350 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1351 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1353 /* Switch to ICT interrupt mode in driver. */
1354 sc->sc_flags |= IWM_FLAG_USE_ICT;
1356 /* Re-enable interrupts. */
1357 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1358 iwm_enable_interrupts(sc);
1361 /* iwlwifi pcie/trans.c */
1364 * Since this .. hard-resets things, it's time to actually
1365 * mark the first vap (if any) as having no mac context.
1366 * It's annoying, but since the driver is potentially being
1367 * stop/start'ed whilst active (thanks openbsd port!) we
1368 * have to correctly track this.
1371 iwm_stop_device(struct iwm_softc *sc)
1373 struct ieee80211com *ic = &sc->sc_ic;
1374 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1378 /* tell the device to stop sending interrupts */
1379 iwm_disable_interrupts(sc);
1382 * FreeBSD-local: mark the first vap as not-uploaded,
1383 * so the next transition through auth/assoc
1384 * will correctly populate the MAC context.
1387 struct iwm_vap *iv = IWM_VAP(vap);
1388 iv->is_uploaded = 0;
1391 /* device going down, Stop using ICT table */
1392 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1394 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1396 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1398 if (iwm_nic_lock(sc)) {
1399 /* Stop each Tx DMA channel */
1400 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1402 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1403 mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1406 /* Wait for DMA channels to be idle */
1407 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1409 device_printf(sc->sc_dev,
1410 "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1411 IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1415 iwm_pcie_rx_stop(sc);
1418 iwm_reset_rx_ring(sc, &sc->rxq);
1420 /* Reset all TX rings. */
1421 for (qid = 0; qid < nitems(sc->txq); qid++)
1422 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1424 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1425 /* Power-down device's busmaster DMA clocks */
1426 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1427 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1431 /* Make sure (redundant) we've released our request to stay awake */
1432 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1433 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1435 /* Stop the device, and put it in low power state */
1438 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1439 * Clean again the interrupt here
1441 iwm_disable_interrupts(sc);
1442 /* stop and reset the on-board processor */
1443 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1446 * Even if we stop the HW, we still want the RF kill
1449 iwm_enable_rfkill_int(sc);
1450 iwm_check_rfkill(sc);
1453 /* iwlwifi: mvm/ops.c */
1455 iwm_mvm_nic_config(struct iwm_softc *sc)
1457 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1458 uint32_t reg_val = 0;
1459 uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1461 radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1462 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1463 radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1464 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1465 radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1466 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1469 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1470 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1471 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1472 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1474 /* radio configuration */
1475 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1476 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1477 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1479 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1481 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1482 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1483 radio_cfg_step, radio_cfg_dash);
1486 * W/A : NIC is stuck in a reset state after Early PCIe power off
1487 * (PCIe power is lost before PERST# is asserted), causing ME FW
1488 * to lose ownership and not being able to obtain it back.
1490 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1491 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1492 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1493 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1498 iwm_nic_rx_init(struct iwm_softc *sc)
1501 * Initialize RX ring. This is from the iwn driver.
1503 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1506 iwm_pcie_rx_stop(sc);
1508 if (!iwm_nic_lock(sc))
1511 /* reset and flush pointers */
1512 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1513 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1514 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1515 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1517 /* Set physical address of RX ring (256-byte aligned). */
1519 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1521 /* Set physical address of RX status (16-byte aligned). */
1523 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1526 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1527 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1528 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1529 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1530 IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
1531 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1532 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1533 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1535 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1537 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1538 if (sc->cfg->host_interrupt_operation_mode)
1539 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1542 * Thus sayeth el jefe (iwlwifi) via a comment:
1544 * This value should initially be 0 (before preparing any
1545 * RBs), should be 8 after preparing the first 8 RBs (for example)
1547 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1555 iwm_nic_tx_init(struct iwm_softc *sc)
1559 if (!iwm_nic_lock(sc))
1562 /* Deactivate TX scheduler. */
1563 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1565 /* Set physical address of "keep warm" page (16-byte aligned). */
1566 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1568 /* Initialize TX rings. */
1569 for (qid = 0; qid < nitems(sc->txq); qid++) {
1570 struct iwm_tx_ring *txq = &sc->txq[qid];
1572 /* Set physical address of TX ring (256-byte aligned). */
1573 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1574 txq->desc_dma.paddr >> 8);
1575 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1576 "%s: loading ring %d descriptors (%p) at %lx\n",
1579 (unsigned long) (txq->desc_dma.paddr >> 8));
1582 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1590 iwm_nic_init(struct iwm_softc *sc)
1595 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1598 iwm_mvm_nic_config(sc);
1600 if ((error = iwm_nic_rx_init(sc)) != 0)
1604 * Ditto for TX, from iwn
1606 if ((error = iwm_nic_tx_init(sc)) != 0)
1609 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1610 "%s: shadow registers enabled\n", __func__);
1611 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1616 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1624 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1626 if (!iwm_nic_lock(sc)) {
1627 device_printf(sc->sc_dev,
1628 "%s: cannot enable txq %d\n",
1634 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1636 if (qid == IWM_MVM_CMD_QUEUE) {
1637 /* unactivate before configuration */
1638 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1639 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1640 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1644 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1646 if (!iwm_nic_lock(sc)) {
1647 device_printf(sc->sc_dev,
1648 "%s: cannot enable txq %d\n", __func__, qid);
1651 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1654 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1655 /* Set scheduler window size and frame limit. */
1657 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1659 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1660 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1661 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1662 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1664 if (!iwm_nic_lock(sc)) {
1665 device_printf(sc->sc_dev,
1666 "%s: cannot enable txq %d\n", __func__, qid);
1669 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1670 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1671 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1672 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1673 IWM_SCD_QUEUE_STTS_REG_MSK);
1675 struct iwm_scd_txq_cfg_cmd cmd;
1680 memset(&cmd, 0, sizeof(cmd));
1681 cmd.scd_queue = qid;
1683 cmd.sta_id = sta_id;
1686 cmd.window = IWM_FRAME_LIMIT;
1688 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1691 device_printf(sc->sc_dev,
1692 "cannot enable txq %d\n", qid);
1696 if (!iwm_nic_lock(sc))
1700 iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1701 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1705 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1706 __func__, qid, fifo);
1712 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1716 int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1717 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1719 if (!iwm_nic_lock(sc))
1726 sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1727 if (scd_base_addr != 0 &&
1728 scd_base_addr != sc->scd_base_addr) {
1729 device_printf(sc->sc_dev,
1730 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1731 __func__, sc->scd_base_addr, scd_base_addr);
1734 /* reset context data, TX status and translation data */
1735 error = iwm_write_mem(sc,
1736 sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1737 NULL, clear_dwords);
1741 if (!iwm_nic_lock(sc))
1744 /* Set physical address of TX scheduler rings (1KB aligned). */
1745 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1747 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1751 /* enable command channel */
1752 error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1756 if (!iwm_nic_lock(sc))
1759 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1761 /* Enable DMA channels. */
1762 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1763 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1764 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1765 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1768 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1769 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1773 /* Enable L1-Active */
1774 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1775 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1776 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1783 * NVM read access and content parsing. We do not support
1784 * external NVM or writing NVM.
1788 /* Default NVM size to read */
1789 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1791 #define IWM_NVM_WRITE_OPCODE 1
1792 #define IWM_NVM_READ_OPCODE 0
1794 /* load nvm chunk response */
1796 IWM_READ_NVM_CHUNK_SUCCEED = 0,
1797 IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1801 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1802 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1804 struct iwm_nvm_access_cmd nvm_access_cmd = {
1805 .offset = htole16(offset),
1806 .length = htole16(length),
1807 .type = htole16(section),
1808 .op_code = IWM_NVM_READ_OPCODE,
1810 struct iwm_nvm_access_resp *nvm_resp;
1811 struct iwm_rx_packet *pkt;
1812 struct iwm_host_cmd cmd = {
1813 .id = IWM_NVM_ACCESS_CMD,
1814 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1815 .data = { &nvm_access_cmd, },
1817 int ret, bytes_read, offset_read;
1820 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1822 ret = iwm_send_cmd(sc, &cmd);
1824 device_printf(sc->sc_dev,
1825 "Could not send NVM_ACCESS command (error=%d)\n", ret);
1831 /* Extract NVM response */
1832 nvm_resp = (void *)pkt->data;
1833 ret = le16toh(nvm_resp->status);
1834 bytes_read = le16toh(nvm_resp->length);
1835 offset_read = le16toh(nvm_resp->offset);
1836 resp_data = nvm_resp->data;
1838 if ((offset != 0) &&
1839 (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1841 * meaning of NOT_VALID_ADDRESS:
1842 * driver try to read chunk from address that is
1843 * multiple of 2K and got an error since addr is empty.
1844 * meaning of (offset != 0): driver already
1845 * read valid data from another chunk so this case
1848 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1849 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1854 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1855 "NVM access command failed with status %d\n", ret);
1861 if (offset_read != offset) {
1862 device_printf(sc->sc_dev,
1863 "NVM ACCESS response with invalid offset %d\n",
1869 if (bytes_read > length) {
1870 device_printf(sc->sc_dev,
1871 "NVM ACCESS response with too much data "
1872 "(%d bytes requested, %d bytes received)\n",
1873 length, bytes_read);
1878 /* Write data to NVM */
1879 memcpy(data + offset, resp_data, bytes_read);
1883 iwm_free_resp(sc, &cmd);
1888 * Reads an NVM section completely.
1889 * NICs prior to 7000 family don't have a real NVM, but just read
1890 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1891 * by uCode, we need to manually check in this case that we don't
1892 * overflow and try to read more than the EEPROM size.
1893 * For 7000 family NICs, we supply the maximal size we can read, and
1894 * the uCode fills the response with as much data as we can,
1895 * without overflowing, so no check is needed.
1898 iwm_nvm_read_section(struct iwm_softc *sc,
1899 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1901 uint16_t seglen, length, offset = 0;
1904 /* Set nvm section read length */
1905 length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1909 /* Read the NVM until exhausted (reading less than requested) */
1910 while (seglen == length) {
1911 /* Check no memory assumptions fail and cause an overflow */
1912 if ((size_read + offset + length) >
1913 sc->cfg->eeprom_size) {
1914 device_printf(sc->sc_dev,
1915 "EEPROM size is too small for NVM\n");
1919 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1921 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1922 "Cannot read NVM from section %d offset %d, length %d\n",
1923 section, offset, length);
1929 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1930 "NVM section %d read completed\n", section);
1936 * BEGIN IWM_NVM_PARSE
1939 /* iwlwifi/iwl-nvm-parse.c */
1941 /* NVM offsets (in words) definitions */
1942 enum iwm_nvm_offsets {
1943 /* NVM HW-Section offset (in words) definitions */
1946 /* NVM SW-Section offset (in words) definitions */
1947 IWM_NVM_SW_SECTION = 0x1C0,
1948 IWM_NVM_VERSION = 0,
1952 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1954 /* NVM calibration section offset (in words) definitions */
1955 IWM_NVM_CALIB_SECTION = 0x2B8,
1956 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1959 enum iwm_8000_nvm_offsets {
1960 /* NVM HW-Section offset (in words) definitions */
1961 IWM_HW_ADDR0_WFPM_8000 = 0x12,
1962 IWM_HW_ADDR1_WFPM_8000 = 0x16,
1963 IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1964 IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1965 IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1967 /* NVM SW-Section offset (in words) definitions */
1968 IWM_NVM_SW_SECTION_8000 = 0x1C0,
1969 IWM_NVM_VERSION_8000 = 0,
1970 IWM_RADIO_CFG_8000 = 0,
1972 IWM_N_HW_ADDRS_8000 = 3,
1974 /* NVM REGULATORY -Section offset (in words) definitions */
1975 IWM_NVM_CHANNELS_8000 = 0,
1976 IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1977 IWM_NVM_LAR_OFFSET_8000 = 0x507,
1978 IWM_NVM_LAR_ENABLED_8000 = 0x7,
1980 /* NVM calibration section offset (in words) definitions */
1981 IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1982 IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1985 /* SKU Capabilities (actual values from NVM definition) */
1987 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
1988 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
1989 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
1990 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
1993 /* radio config bits (actual values from NVM definition) */
1994 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
1995 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
1996 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
1997 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
1998 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
1999 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
2001 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x) (x & 0xF)
2002 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x) ((x >> 4) & 0xF)
2003 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x) ((x >> 8) & 0xF)
2004 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x) ((x >> 12) & 0xFFF)
2005 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x) ((x >> 24) & 0xF)
2006 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x) ((x >> 28) & 0xF)
2008 #define DEFAULT_MAX_TX_POWER 16
2011 * enum iwm_nvm_channel_flags - channel flags in NVM
2012 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
2013 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
2014 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
2015 * @IWM_NVM_CHANNEL_RADAR: radar detection required
2016 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
2017 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
2018 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
2019 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
2020 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
2021 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
2023 enum iwm_nvm_channel_flags {
2024 IWM_NVM_CHANNEL_VALID = (1 << 0),
2025 IWM_NVM_CHANNEL_IBSS = (1 << 1),
2026 IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
2027 IWM_NVM_CHANNEL_RADAR = (1 << 4),
2028 IWM_NVM_CHANNEL_DFS = (1 << 7),
2029 IWM_NVM_CHANNEL_WIDE = (1 << 8),
2030 IWM_NVM_CHANNEL_40MHZ = (1 << 9),
2031 IWM_NVM_CHANNEL_80MHZ = (1 << 10),
2032 IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2036 * Translate EEPROM flags to net80211.
2039 iwm_eeprom_channel_flags(uint16_t ch_flags)
2044 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2045 nflags |= IEEE80211_CHAN_PASSIVE;
2046 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2047 nflags |= IEEE80211_CHAN_NOADHOC;
2048 if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2049 nflags |= IEEE80211_CHAN_DFS;
2051 nflags |= IEEE80211_CHAN_NOADHOC;
2058 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2059 int maxchans, int *nchans, int ch_idx, size_t ch_num,
2060 const uint8_t bands[])
2062 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2068 for (; ch_idx < ch_num; ch_idx++) {
2069 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2070 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2071 ieee = iwm_nvm_channels[ch_idx];
2073 ieee = iwm_nvm_channels_8000[ch_idx];
2075 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2076 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2077 "Ch. %d Flags %x [%sGHz] - No traffic\n",
2079 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2084 nflags = iwm_eeprom_channel_flags(ch_flags);
2085 error = ieee80211_add_channel(chans, maxchans, nchans,
2086 ieee, 0, 0, nflags, bands);
2090 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2091 "Ch. %d Flags %x [%sGHz] - Added\n",
2093 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2099 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2100 struct ieee80211_channel chans[])
2102 struct iwm_softc *sc = ic->ic_softc;
2103 struct iwm_nvm_data *data = sc->nvm_data;
2104 uint8_t bands[IEEE80211_MODE_BYTES];
2107 memset(bands, 0, sizeof(bands));
2108 /* 1-13: 11b/g channels. */
2109 setbit(bands, IEEE80211_MODE_11B);
2110 setbit(bands, IEEE80211_MODE_11G);
2111 iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2112 IWM_NUM_2GHZ_CHANNELS - 1, bands);
2114 /* 14: 11b channel only. */
2115 clrbit(bands, IEEE80211_MODE_11G);
2116 iwm_add_channel_band(sc, chans, maxchans, nchans,
2117 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2119 if (data->sku_cap_band_52GHz_enable) {
2120 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2121 ch_num = nitems(iwm_nvm_channels);
2123 ch_num = nitems(iwm_nvm_channels_8000);
2124 memset(bands, 0, sizeof(bands));
2125 setbit(bands, IEEE80211_MODE_11A);
2126 iwm_add_channel_band(sc, chans, maxchans, nchans,
2127 IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2132 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2133 const uint16_t *mac_override, const uint16_t *nvm_hw)
2135 const uint8_t *hw_addr;
2138 static const uint8_t reserved_mac[] = {
2139 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2142 hw_addr = (const uint8_t *)(mac_override +
2143 IWM_MAC_ADDRESS_OVERRIDE_8000);
2146 * Store the MAC address from MAO section.
2147 * No byte swapping is required in MAO section
2149 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2152 * Force the use of the OTP MAC address in case of reserved MAC
2153 * address in the NVM, or if address is given but invalid.
2155 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2156 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2157 iwm_is_valid_ether_addr(data->hw_addr) &&
2158 !IEEE80211_IS_MULTICAST(data->hw_addr))
2161 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2162 "%s: mac address from nvm override section invalid\n",
2167 /* read the mac address from WFMP registers */
2168 uint32_t mac_addr0 =
2169 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2170 uint32_t mac_addr1 =
2171 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2173 hw_addr = (const uint8_t *)&mac_addr0;
2174 data->hw_addr[0] = hw_addr[3];
2175 data->hw_addr[1] = hw_addr[2];
2176 data->hw_addr[2] = hw_addr[1];
2177 data->hw_addr[3] = hw_addr[0];
2179 hw_addr = (const uint8_t *)&mac_addr1;
2180 data->hw_addr[4] = hw_addr[1];
2181 data->hw_addr[5] = hw_addr[0];
2186 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2187 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2191 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2192 const uint16_t *phy_sku)
2194 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2195 return le16_to_cpup(nvm_sw + IWM_SKU);
2197 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2201 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2203 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2204 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2206 return le32_to_cpup((const uint32_t *)(nvm_sw +
2207 IWM_NVM_VERSION_8000));
2211 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2212 const uint16_t *phy_sku)
2214 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2215 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2217 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2221 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2225 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2226 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2228 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2230 return n_hw_addr & IWM_N_HW_ADDR_MASK;
2234 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2237 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2238 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2239 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2240 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2241 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2245 /* set the radio configuration for family 8000 */
2246 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2247 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2248 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2249 data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2250 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2251 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2255 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2256 const uint16_t *nvm_hw, const uint16_t *mac_override)
2258 #ifdef notyet /* for FAMILY 9000 */
2259 if (cfg->mac_addr_from_csr) {
2260 iwm_set_hw_address_from_csr(sc, data);
2263 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2264 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2266 /* The byte order is little endian 16 bit, meaning 214365 */
2267 data->hw_addr[0] = hw_addr[1];
2268 data->hw_addr[1] = hw_addr[0];
2269 data->hw_addr[2] = hw_addr[3];
2270 data->hw_addr[3] = hw_addr[2];
2271 data->hw_addr[4] = hw_addr[5];
2272 data->hw_addr[5] = hw_addr[4];
2274 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2277 if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2278 device_printf(sc->sc_dev, "no valid mac address was found\n");
2285 static struct iwm_nvm_data *
2286 iwm_parse_nvm_data(struct iwm_softc *sc,
2287 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2288 const uint16_t *nvm_calib, const uint16_t *mac_override,
2289 const uint16_t *phy_sku, const uint16_t *regulatory)
2291 struct iwm_nvm_data *data;
2292 uint32_t sku, radio_cfg;
2294 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2295 data = malloc(sizeof(*data) +
2296 IWM_NUM_CHANNELS * sizeof(uint16_t),
2297 M_DEVBUF, M_NOWAIT | M_ZERO);
2299 data = malloc(sizeof(*data) +
2300 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2301 M_DEVBUF, M_NOWAIT | M_ZERO);
2306 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2308 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2309 iwm_set_radio_cfg(sc, data, radio_cfg);
2311 sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2312 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2313 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2314 data->sku_cap_11n_enable = 0;
2316 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2318 /* If no valid mac address was found - bail out */
2319 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2320 free(data, M_DEVBUF);
2324 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2325 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2326 IWM_NUM_CHANNELS * sizeof(uint16_t));
2328 memcpy(data->nvm_ch_flags, ®ulatory[IWM_NVM_CHANNELS_8000],
2329 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2336 iwm_free_nvm_data(struct iwm_nvm_data *data)
2339 free(data, M_DEVBUF);
2342 static struct iwm_nvm_data *
2343 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2345 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2347 /* Checking for required sections */
2348 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2349 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2350 !sections[sc->cfg->nvm_hw_section_num].data) {
2351 device_printf(sc->sc_dev,
2352 "Can't parse empty OTP/NVM sections\n");
2355 } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2356 /* SW and REGULATORY sections are mandatory */
2357 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2358 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2359 device_printf(sc->sc_dev,
2360 "Can't parse empty OTP/NVM sections\n");
2363 /* MAC_OVERRIDE or at least HW section must exist */
2364 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2365 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2366 device_printf(sc->sc_dev,
2367 "Can't parse mac_address, empty sections\n");
2371 /* PHY_SKU section is mandatory in B0 */
2372 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2373 device_printf(sc->sc_dev,
2374 "Can't parse phy_sku in B0, empty sections\n");
2378 panic("unknown device family %d\n", sc->cfg->device_family);
2381 hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2382 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2383 calib = (const uint16_t *)
2384 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2385 regulatory = (const uint16_t *)
2386 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2387 mac_override = (const uint16_t *)
2388 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2389 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2391 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2392 phy_sku, regulatory);
2396 iwm_nvm_init(struct iwm_softc *sc)
2398 struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2399 int i, ret, section;
2400 uint32_t size_read = 0;
2401 uint8_t *nvm_buffer, *temp;
2404 memset(nvm_sections, 0, sizeof(nvm_sections));
2406 if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2409 /* load NVM values from nic */
2410 /* Read From FW NVM */
2411 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2413 nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2416 for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2417 /* we override the constness for initial read */
2418 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2423 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2428 memcpy(temp, nvm_buffer, len);
2430 nvm_sections[section].data = temp;
2431 nvm_sections[section].length = len;
2434 device_printf(sc->sc_dev, "OTP is blank\n");
2435 free(nvm_buffer, M_DEVBUF);
2437 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2440 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2441 "nvm version = %x\n", sc->nvm_data->nvm_version);
2443 for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2444 if (nvm_sections[i].data != NULL)
2445 free(nvm_sections[i].data, M_DEVBUF);
2452 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2453 const struct iwm_fw_desc *section)
2455 struct iwm_dma_info *dma = &sc->fw_dma;
2458 uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2461 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2462 "%s: [%d] uCode section being loaded...\n",
2463 __func__, section_num);
2465 v_addr = dma->vaddr;
2466 p_addr = dma->paddr;
2468 for (offset = 0; offset < section->len; offset += chunk_sz) {
2469 uint32_t copy_size, dst_addr;
2470 int extended_addr = FALSE;
2472 copy_size = MIN(chunk_sz, section->len - offset);
2473 dst_addr = section->offset + offset;
2475 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2476 dst_addr <= IWM_FW_MEM_EXTENDED_END)
2477 extended_addr = TRUE;
2480 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2481 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2483 memcpy(v_addr, (const uint8_t *)section->data + offset,
2485 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2486 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2490 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2491 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2494 device_printf(sc->sc_dev,
2495 "%s: Could not load the [%d] uCode section\n",
2496 __func__, section_num);
2508 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2509 bus_addr_t phy_addr, uint32_t byte_cnt)
2513 sc->sc_fw_chunk_done = 0;
2515 if (!iwm_nic_lock(sc))
2518 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2519 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2521 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2524 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2525 phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2527 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2528 (iwm_get_dma_hi_addr(phy_addr)
2529 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2531 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2532 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2533 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2534 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2536 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2537 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2538 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2539 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2543 /* wait up to 5s for this segment to load */
2545 while (!sc->sc_fw_chunk_done) {
2546 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2552 device_printf(sc->sc_dev,
2553 "fw chunk addr 0x%x len %d failed to load\n",
2554 dst_addr, byte_cnt);
2562 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2563 const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2566 int i, ret = 0, sec_num = 0x1;
2567 uint32_t val, last_read_idx = 0;
2571 *first_ucode_section = 0;
2574 (*first_ucode_section)++;
2577 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2581 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2583 * PAGING_SEPARATOR_SECTION delimiter - separate between
2584 * CPU2 non paged to CPU2 paging sec.
2586 if (!image->fw_sect[i].data ||
2587 image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2588 image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2589 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2590 "Break since Data not valid or Empty section, sec = %d\n",
2594 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2598 /* Notify the ucode of the loaded section number and status */
2599 if (iwm_nic_lock(sc)) {
2600 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2601 val = val | (sec_num << shift_param);
2602 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2603 sec_num = (sec_num << 1) | 0x1;
2608 *first_ucode_section = last_read_idx;
2610 iwm_enable_interrupts(sc);
2612 if (iwm_nic_lock(sc)) {
2614 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2616 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2624 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2625 const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2629 uint32_t last_read_idx = 0;
2633 *first_ucode_section = 0;
2636 (*first_ucode_section)++;
2639 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2643 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2645 * PAGING_SEPARATOR_SECTION delimiter - separate between
2646 * CPU2 non paged to CPU2 paging sec.
2648 if (!image->fw_sect[i].data ||
2649 image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2650 image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2651 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2652 "Break since Data not valid or Empty section, sec = %d\n",
2657 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2662 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2663 iwm_set_bits_prph(sc,
2664 IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2665 (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2666 IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2667 IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2670 *first_ucode_section = last_read_idx;
2677 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2678 const struct iwm_fw_sects *image)
2681 int first_ucode_section;
2683 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2684 image->is_dual_cpus ? "Dual" : "Single");
2686 /* load to FW the binary non secured sections of CPU1 */
2687 ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2691 if (image->is_dual_cpus) {
2692 /* set CPU2 header address */
2694 IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2695 IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2697 /* load to FW the binary sections of CPU2 */
2698 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2699 &first_ucode_section);
2704 iwm_enable_interrupts(sc);
2706 /* release CPU reset */
2707 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2713 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2714 const struct iwm_fw_sects *image)
2717 int first_ucode_section;
2719 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2720 image->is_dual_cpus ? "Dual" : "Single");
2722 /* configure the ucode to be ready to get the secured image */
2723 /* release CPU reset */
2724 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2726 /* load to FW the binary Secured sections of CPU1 */
2727 ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2728 &first_ucode_section);
2732 /* load to FW the binary sections of CPU2 */
2733 return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2734 &first_ucode_section);
2737 /* XXX Get rid of this definition */
2739 iwm_enable_fw_load_int(struct iwm_softc *sc)
2741 IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2742 sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2743 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2746 /* XXX Add proper rfkill support code */
2748 iwm_start_fw(struct iwm_softc *sc,
2749 const struct iwm_fw_sects *fw)
2753 /* This may fail if AMT took ownership of the device */
2754 if (iwm_prepare_card_hw(sc)) {
2755 device_printf(sc->sc_dev,
2756 "%s: Exit HW not ready\n", __func__);
2761 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2763 iwm_disable_interrupts(sc);
2765 /* make sure rfkill handshake bits are cleared */
2766 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2767 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2768 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2770 /* clear (again), then enable host interrupts */
2771 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2773 ret = iwm_nic_init(sc);
2775 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2780 * Now, we load the firmware and don't want to be interrupted, even
2781 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2782 * FH_TX interrupt which is needed to load the firmware). If the
2783 * RF-Kill switch is toggled, we will find out after having loaded
2784 * the firmware and return the proper value to the caller.
2786 iwm_enable_fw_load_int(sc);
2788 /* really make sure rfkill handshake bits are cleared */
2789 /* maybe we should write a few times more? just to make sure */
2790 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2791 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2793 /* Load the given image to the HW */
2794 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2795 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2797 ret = iwm_pcie_load_given_ucode(sc, fw);
2799 /* XXX re-check RF-Kill state */
2806 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2808 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2809 .valid = htole32(valid_tx_ant),
2812 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2813 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2816 /* iwlwifi: mvm/fw.c */
2818 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2820 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2821 enum iwm_ucode_type ucode_type = sc->cur_ucode;
2823 /* Set parameters */
2824 phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2825 phy_cfg_cmd.calib_control.event_trigger =
2826 sc->sc_default_calib[ucode_type].event_trigger;
2827 phy_cfg_cmd.calib_control.flow_trigger =
2828 sc->sc_default_calib[ucode_type].flow_trigger;
2830 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2831 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2832 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2833 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2837 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2839 struct iwm_mvm_alive_data *alive_data = data;
2840 struct iwm_mvm_alive_resp_ver1 *palive1;
2841 struct iwm_mvm_alive_resp_ver2 *palive2;
2842 struct iwm_mvm_alive_resp *palive;
2844 if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2845 palive1 = (void *)pkt->data;
2847 sc->support_umac_log = FALSE;
2848 sc->error_event_table =
2849 le32toh(palive1->error_event_table_ptr);
2850 sc->log_event_table =
2851 le32toh(palive1->log_event_table_ptr);
2852 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2854 alive_data->valid = le16toh(palive1->status) ==
2855 IWM_ALIVE_STATUS_OK;
2856 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2857 "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2858 le16toh(palive1->status), palive1->ver_type,
2859 palive1->ver_subtype, palive1->flags);
2860 } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2861 palive2 = (void *)pkt->data;
2862 sc->error_event_table =
2863 le32toh(palive2->error_event_table_ptr);
2864 sc->log_event_table =
2865 le32toh(palive2->log_event_table_ptr);
2866 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2867 sc->umac_error_event_table =
2868 le32toh(palive2->error_info_addr);
2870 alive_data->valid = le16toh(palive2->status) ==
2871 IWM_ALIVE_STATUS_OK;
2872 if (sc->umac_error_event_table)
2873 sc->support_umac_log = TRUE;
2875 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2876 "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2877 le16toh(palive2->status), palive2->ver_type,
2878 palive2->ver_subtype, palive2->flags);
2880 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2881 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2882 palive2->umac_major, palive2->umac_minor);
2883 } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2884 palive = (void *)pkt->data;
2886 sc->error_event_table =
2887 le32toh(palive->error_event_table_ptr);
2888 sc->log_event_table =
2889 le32toh(palive->log_event_table_ptr);
2890 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2891 sc->umac_error_event_table =
2892 le32toh(palive->error_info_addr);
2894 alive_data->valid = le16toh(palive->status) ==
2895 IWM_ALIVE_STATUS_OK;
2896 if (sc->umac_error_event_table)
2897 sc->support_umac_log = TRUE;
2899 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2900 "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2901 le16toh(palive->status), palive->ver_type,
2902 palive->ver_subtype, palive->flags);
2904 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2905 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2906 le32toh(palive->umac_major),
2907 le32toh(palive->umac_minor));
2914 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2915 struct iwm_rx_packet *pkt, void *data)
2917 struct iwm_phy_db *phy_db = data;
2919 if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2920 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2921 device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2922 __func__, pkt->hdr.code);
2927 if (iwm_phy_db_set_section(phy_db, pkt)) {
2928 device_printf(sc->sc_dev,
2929 "%s: iwm_phy_db_set_section failed\n", __func__);
2936 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2937 enum iwm_ucode_type ucode_type)
2939 struct iwm_notification_wait alive_wait;
2940 struct iwm_mvm_alive_data alive_data;
2941 const struct iwm_fw_sects *fw;
2942 enum iwm_ucode_type old_type = sc->cur_ucode;
2944 static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2946 if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2947 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2951 fw = &sc->sc_fw.fw_sects[ucode_type];
2952 sc->cur_ucode = ucode_type;
2953 sc->ucode_loaded = FALSE;
2955 memset(&alive_data, 0, sizeof(alive_data));
2956 iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2957 alive_cmd, nitems(alive_cmd),
2958 iwm_alive_fn, &alive_data);
2960 error = iwm_start_fw(sc, fw);
2962 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2963 sc->cur_ucode = old_type;
2964 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2969 * Some things may run in the background now, but we
2970 * just wait for the ALIVE notification here.
2973 error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2974 IWM_MVM_UCODE_ALIVE_TIMEOUT);
2977 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2978 device_printf(sc->sc_dev,
2979 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2980 iwm_read_prph(sc, IWM_SB_CPU_1_STATUS),
2981 iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2983 sc->cur_ucode = old_type;
2987 if (!alive_data.valid) {
2988 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2990 sc->cur_ucode = old_type;
2994 iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2997 * configure and operate fw paging mechanism.
2998 * driver configures the paging flow only once, CPU2 paging image
2999 * included in the IWM_UCODE_INIT image.
3001 if (fw->paging_mem_size) {
3002 /* XXX implement FW paging */
3003 device_printf(sc->sc_dev,
3004 "%s: XXX FW paging not implemented yet\n", __func__);
3008 sc->ucode_loaded = TRUE;
3017 * follows iwlwifi/fw.c
3020 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3022 struct iwm_notification_wait calib_wait;
3023 static const uint16_t init_complete[] = {
3024 IWM_INIT_COMPLETE_NOTIF,
3025 IWM_CALIB_RES_NOTIF_PHY_DB
3029 /* do not operate with rfkill switch turned on */
3030 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3031 device_printf(sc->sc_dev,
3032 "radio is disabled by hardware switch\n");
3036 iwm_init_notification_wait(sc->sc_notif_wait,
3039 nitems(init_complete),
3040 iwm_wait_phy_db_entry,
3043 /* Will also start the device */
3044 ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
3046 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
3053 ret = iwm_nvm_init(sc);
3055 device_printf(sc->sc_dev, "failed to read nvm\n");
3058 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3062 ret = iwm_send_bt_init_conf(sc);
3064 device_printf(sc->sc_dev,
3065 "failed to send bt coex configuration: %d\n", ret);
3069 /* Init Smart FIFO. */
3070 ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
3074 /* Send TX valid antennas before triggering calibrations */
3075 ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3077 device_printf(sc->sc_dev,
3078 "failed to send antennas before calibration: %d\n", ret);
3083 * Send phy configurations command to init uCode
3084 * to start the 16.0 uCode init image internal calibrations.
3086 ret = iwm_send_phy_cfg_cmd(sc);
3088 device_printf(sc->sc_dev,
3089 "%s: Failed to run INIT calibrations: %d\n",
3095 * Nothing to do but wait for the init complete notification
3096 * from the firmware.
3099 ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3100 IWM_MVM_UCODE_CALIB_TIMEOUT);
3107 iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3116 /* (re)stock rx ring, called at init-time and at runtime */
3118 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3120 struct iwm_rx_ring *ring = &sc->rxq;
3121 struct iwm_rx_data *data = &ring->data[idx];
3123 bus_dmamap_t dmamap = NULL;
3124 bus_dma_segment_t seg;
3127 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3131 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3132 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3133 &seg, &nsegs, BUS_DMA_NOWAIT);
3135 device_printf(sc->sc_dev,
3136 "%s: can't map mbuf, error %d\n", __func__, error);
3140 if (data->m != NULL)
3141 bus_dmamap_unload(ring->data_dmat, data->map);
3143 /* Swap ring->spare_map with data->map */
3145 data->map = ring->spare_map;
3146 ring->spare_map = dmamap;
3148 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3151 /* Update RX descriptor. */
3152 KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3153 ring->desc[idx] = htole32(seg.ds_addr >> 8);
3154 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3155 BUS_DMASYNC_PREWRITE);
3163 /* iwlwifi: mvm/rx.c */
3164 #define IWM_RSSI_OFFSET 50
3166 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3168 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3169 uint32_t agc_a, agc_b;
3172 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3173 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3174 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3176 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3177 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3178 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3181 * dBm = rssi dB - agc dB - constant.
3182 * Higher AGC (higher radio gain) means lower signal.
3184 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3185 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3186 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3188 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3189 "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3190 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
3192 return max_rssi_dbm;
3195 /* iwlwifi: mvm/rx.c */
3197 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3198 * values are reported by the fw as positive values - need to negate
3199 * to obtain their dBM. Account for missing antennas by replacing 0
3200 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3203 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3205 int energy_a, energy_b, energy_c, max_energy;
3208 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3209 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3210 IWM_RX_INFO_ENERGY_ANT_A_POS;
3211 energy_a = energy_a ? -energy_a : -256;
3212 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3213 IWM_RX_INFO_ENERGY_ANT_B_POS;
3214 energy_b = energy_b ? -energy_b : -256;
3215 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3216 IWM_RX_INFO_ENERGY_ANT_C_POS;
3217 energy_c = energy_c ? -energy_c : -256;
3218 max_energy = MAX(energy_a, energy_b);
3219 max_energy = MAX(max_energy, energy_c);
3221 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3222 "energy In A %d B %d C %d , and max %d\n",
3223 energy_a, energy_b, energy_c, max_energy);
3229 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
3230 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3232 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3234 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3235 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3237 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3241 * Retrieve the average noise (in dBm) among receivers.
3244 iwm_get_noise(struct iwm_softc *sc,
3245 const struct iwm_mvm_statistics_rx_non_phy *stats)
3247 int i, total, nbant, noise;
3249 total = nbant = noise = 0;
3250 for (i = 0; i < 3; i++) {
3251 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3252 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3263 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3264 __func__, nbant, total);
3266 /* There should be at least one antenna but check anyway. */
3267 return (nbant == 0) ? -127 : (total / nbant) - 107;
3269 /* For now, just hard-code it to -96 to be safe */
3275 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3277 * Handles the actual data of the Rx packet from the fw
3280 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3281 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3283 struct ieee80211com *ic = &sc->sc_ic;
3284 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3285 struct ieee80211_frame *wh;
3286 struct ieee80211_node *ni;
3287 struct ieee80211_rx_stats rxs;
3289 struct iwm_rx_phy_info *phy_info;
3290 struct iwm_rx_mpdu_res_start *rx_res;
3292 uint32_t rx_pkt_status;
3295 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3297 phy_info = &sc->sc_last_phy_info;
3298 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3299 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3300 len = le16toh(rx_res->byte_count);
3301 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3304 m->m_data = pkt->data + sizeof(*rx_res);
3305 m->m_pkthdr.len = m->m_len = len;
3307 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3308 device_printf(sc->sc_dev,
3309 "dsp size out of range [0,20]: %d\n",
3310 phy_info->cfg_phy_cnt);
3314 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3315 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3316 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3317 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3321 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3322 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3324 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3327 /* Note: RSSI is absolute (ie a -ve value) */
3328 if (rssi < IWM_MIN_DBM)
3330 else if (rssi > IWM_MAX_DBM)
3333 /* Map it to relative value */
3334 rssi = rssi - sc->sc_noise;
3336 /* replenish ring for the buffer we're going to feed to the sharks */
3337 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3338 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3343 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3344 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3346 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3348 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3349 "%s: phy_info: channel=%d, flags=0x%08x\n",
3351 le16toh(phy_info->channel),
3352 le16toh(phy_info->phy_flags));
3355 * Populate an RX state struct with the provided information.
3357 bzero(&rxs, sizeof(rxs));
3358 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3359 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3360 rxs.c_ieee = le16toh(phy_info->channel);
3361 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3362 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3364 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3367 /* rssi is in 1/2db units */
3368 rxs.rssi = rssi * 2;
3369 rxs.nf = sc->sc_noise;
3371 if (ieee80211_radiotap_active_vap(vap)) {
3372 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3375 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3376 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3377 tap->wr_chan_freq = htole16(rxs.c_freq);
3378 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3379 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3380 tap->wr_dbm_antsignal = (int8_t)rssi;
3381 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3382 tap->wr_tsft = phy_info->system_timestamp;
3383 switch (phy_info->rate) {
3385 case 10: tap->wr_rate = 2; break;
3386 case 20: tap->wr_rate = 4; break;
3387 case 55: tap->wr_rate = 11; break;
3388 case 110: tap->wr_rate = 22; break;
3390 case 0xd: tap->wr_rate = 12; break;
3391 case 0xf: tap->wr_rate = 18; break;
3392 case 0x5: tap->wr_rate = 24; break;
3393 case 0x7: tap->wr_rate = 36; break;
3394 case 0x9: tap->wr_rate = 48; break;
3395 case 0xb: tap->wr_rate = 72; break;
3396 case 0x1: tap->wr_rate = 96; break;
3397 case 0x3: tap->wr_rate = 108; break;
3398 /* Unknown rate: should not happen. */
3399 default: tap->wr_rate = 0;
3405 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3406 ieee80211_input_mimo(ni, m, &rxs);
3407 ieee80211_free_node(ni);
3409 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3410 ieee80211_input_mimo_all(ic, m, &rxs);
3416 fail: counter_u64_add(ic->ic_ierrors, 1);
3420 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3421 struct iwm_node *in)
3423 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3424 struct ieee80211_node *ni = &in->in_ni;
3425 struct ieee80211vap *vap = ni->ni_vap;
3426 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3427 int failack = tx_resp->failure_frame;
3429 KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3431 /* Update rate control statistics. */
3432 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3434 (int) le16toh(tx_resp->status.status),
3435 (int) le16toh(tx_resp->status.sequence),
3436 tx_resp->frame_count,
3437 tx_resp->bt_kill_count,
3438 tx_resp->failure_rts,
3439 tx_resp->failure_frame,
3440 le32toh(tx_resp->initial_rate),
3441 (int) le16toh(tx_resp->wireless_media_time));
3443 if (status != IWM_TX_STATUS_SUCCESS &&
3444 status != IWM_TX_STATUS_DIRECT_DONE) {
3445 ieee80211_ratectl_tx_complete(vap, ni,
3446 IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3449 ieee80211_ratectl_tx_complete(vap, ni,
3450 IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3456 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3457 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3459 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3460 int idx = cmd_hdr->idx;
3461 int qid = cmd_hdr->qid;
3462 struct iwm_tx_ring *ring = &sc->txq[qid];
3463 struct iwm_tx_data *txd = &ring->data[idx];
3464 struct iwm_node *in = txd->in;
3465 struct mbuf *m = txd->m;
3468 KASSERT(txd->done == 0, ("txd not done"));
3469 KASSERT(txd->in != NULL, ("txd without node"));
3470 KASSERT(txd->m != NULL, ("txd without mbuf"));
3472 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3474 sc->sc_tx_timer = 0;
3476 status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3478 /* Unmap and free mbuf. */
3479 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3480 bus_dmamap_unload(ring->data_dmat, txd->map);
3482 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3483 "free txd %p, in %p\n", txd, txd->in);
3488 ieee80211_tx_complete(&in->in_ni, m, status);
3490 if (--ring->queued < IWM_TX_RING_LOMARK) {
3491 sc->qfullmsk &= ~(1 << ring->qid);
3492 if (sc->qfullmsk == 0) {
3503 * Process a "command done" firmware notification. This is where we wakeup
3504 * processes waiting for a synchronous command completion.
3508 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3510 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3511 struct iwm_tx_data *data;
3513 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3514 return; /* Not a command ack. */
3517 /* XXX wide commands? */
3518 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3519 "cmd notification type 0x%x qid %d idx %d\n",
3520 pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3522 data = &ring->data[pkt->hdr.idx];
3524 /* If the command was mapped in an mbuf, free it. */
3525 if (data->m != NULL) {
3526 bus_dmamap_sync(ring->data_dmat, data->map,
3527 BUS_DMASYNC_POSTWRITE);
3528 bus_dmamap_unload(ring->data_dmat, data->map);
3532 wakeup(&ring->desc[pkt->hdr.idx]);
3534 if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3535 device_printf(sc->sc_dev,
3536 "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3537 __func__, pkt->hdr.idx, ring->queued, ring->cur);
3538 /* XXX call iwm_force_nmi() */
3541 KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3543 if (ring->queued == 0)
3544 iwm_pcie_clear_cmd_in_flight(sc);
3549 * necessary only for block ack mode
3552 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3555 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3558 scd_bc_tbl = sc->sched_dma.vaddr;
3560 len += 8; /* magic numbers came naturally from paris */
3561 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3562 len = roundup(len, 4) / 4;
3564 w_val = htole16(sta_id << 12 | len);
3566 /* Update TX scheduler. */
3567 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3568 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3569 BUS_DMASYNC_PREWRITE);
3571 /* I really wonder what this is ?!? */
3572 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3573 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3574 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3575 BUS_DMASYNC_PREWRITE);
3581 * Take an 802.11 (non-n) rate, find the relevant rate
3582 * table entry. return the index into in_ridx[].
3584 * The caller then uses that index back into in_ridx
3585 * to figure out the rate index programmed /into/
3586 * the firmware for this given node.
3589 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3595 for (i = 0; i < nitems(in->in_ridx); i++) {
3596 r = iwm_rates[in->in_ridx[i]].rate;
3601 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3602 "%s: couldn't find an entry for rate=%d\n",
3606 /* XXX Return the first */
3607 /* XXX TODO: have it return the /lowest/ */
3612 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3616 for (i = 0; i < nitems(iwm_rates); i++) {
3617 if (iwm_rates[i].rate == rate)
3621 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3622 "%s: couldn't find an entry for rate=%d\n",
3629 * Fill in the rate related information for a transmit command.
3631 static const struct iwm_rate *
3632 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3633 struct mbuf *m, struct iwm_tx_cmd *tx)
3635 struct ieee80211_node *ni = &in->in_ni;
3636 struct ieee80211_frame *wh;
3637 const struct ieee80211_txparam *tp = ni->ni_txparms;
3638 const struct iwm_rate *rinfo;
3640 int ridx, rate_flags;
3642 wh = mtod(m, struct ieee80211_frame *);
3643 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3645 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3646 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3648 if (type == IEEE80211_FC0_TYPE_MGT) {
3649 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3650 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3651 "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3652 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3653 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3654 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3655 "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3656 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3657 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3658 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3659 "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3660 } else if (m->m_flags & M_EAPOL) {
3661 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3662 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3663 "%s: EAPOL\n", __func__);
3664 } else if (type == IEEE80211_FC0_TYPE_DATA) {
3667 /* for data frames, use RS table */
3668 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3669 /* XXX pass pktlen */
3670 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3671 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3672 ridx = in->in_ridx[i];
3674 /* This is the index into the programmed table */
3675 tx->initial_rate_index = i;
3676 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3678 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3679 "%s: start with i=%d, txrate %d\n",
3680 __func__, i, iwm_rates[ridx].rate);
3682 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3683 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3684 __func__, tp->mgmtrate);
3687 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3688 "%s: frame type=%d txrate %d\n",
3689 __func__, type, iwm_rates[ridx].rate);
3691 rinfo = &iwm_rates[ridx];
3693 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3696 !! (IWM_RIDX_IS_CCK(ridx))
3699 /* XXX TODO: hard-coded TX antenna? */
3700 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3701 if (IWM_RIDX_IS_CCK(ridx))
3702 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3703 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3710 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3712 struct ieee80211com *ic = &sc->sc_ic;
3713 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3714 struct iwm_node *in = IWM_NODE(ni);
3715 struct iwm_tx_ring *ring;
3716 struct iwm_tx_data *data;
3717 struct iwm_tfd *desc;
3718 struct iwm_device_cmd *cmd;
3719 struct iwm_tx_cmd *tx;
3720 struct ieee80211_frame *wh;
3721 struct ieee80211_key *k = NULL;
3723 const struct iwm_rate *rinfo;
3726 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3729 int i, totlen, error, pad;
3731 wh = mtod(m, struct ieee80211_frame *);
3732 hdrlen = ieee80211_anyhdrsize(wh);
3733 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3735 ring = &sc->txq[ac];
3736 desc = &ring->desc[ring->cur];
3737 memset(desc, 0, sizeof(*desc));
3738 data = &ring->data[ring->cur];
3740 /* Fill out iwm_tx_cmd to send to the firmware */
3741 cmd = &ring->cmd[ring->cur];
3742 cmd->hdr.code = IWM_TX_CMD;
3744 cmd->hdr.qid = ring->qid;
3745 cmd->hdr.idx = ring->cur;
3747 tx = (void *)cmd->data;
3748 memset(tx, 0, sizeof(*tx));
3750 rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3752 /* Encrypt the frame if need be. */
3753 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3754 /* Retrieve key for TX && do software encryption. */
3755 k = ieee80211_crypto_encap(ni, m);
3760 /* 802.11 header may have moved. */
3761 wh = mtod(m, struct ieee80211_frame *);
3764 if (ieee80211_radiotap_active_vap(vap)) {
3765 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3768 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3769 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3770 tap->wt_rate = rinfo->rate;
3772 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3773 ieee80211_radiotap_tx(vap, m);
3777 totlen = m->m_pkthdr.len;
3780 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3781 flags |= IWM_TX_CMD_FLG_ACK;
3784 if (type == IEEE80211_FC0_TYPE_DATA
3785 && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3786 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3787 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3790 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3791 type != IEEE80211_FC0_TYPE_DATA)
3792 tx->sta_id = sc->sc_aux_sta.sta_id;
3794 tx->sta_id = IWM_STATION_ID;
3796 if (type == IEEE80211_FC0_TYPE_MGT) {
3797 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3799 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3800 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3801 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3802 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3803 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3805 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3808 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3812 /* First segment length must be a multiple of 4. */
3813 flags |= IWM_TX_CMD_FLG_MH_PAD;
3814 pad = 4 - (hdrlen & 3);
3818 tx->driver_txop = 0;
3819 tx->next_frame_len = 0;
3821 tx->len = htole16(totlen);
3822 tx->tid_tspec = tid;
3823 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3825 /* Set physical address of "scratch area". */
3826 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3827 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3829 /* Copy 802.11 header in TX command. */
3830 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3832 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3835 tx->tx_flags |= htole32(flags);
3837 /* Trim 802.11 header. */
3839 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3840 segs, &nsegs, BUS_DMA_NOWAIT);
3842 if (error != EFBIG) {
3843 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3848 /* Too many DMA segments, linearize mbuf. */
3849 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3851 device_printf(sc->sc_dev,
3852 "%s: could not defrag mbuf\n", __func__);
3858 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3859 segs, &nsegs, BUS_DMA_NOWAIT);
3861 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3871 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3872 "sending txd %p, in %p\n", data, data->in);
3873 KASSERT(data->in != NULL, ("node is NULL"));
3875 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3876 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3877 ring->qid, ring->cur, totlen, nsegs,
3878 le32toh(tx->tx_flags),
3879 le32toh(tx->rate_n_flags),
3880 tx->initial_rate_index
3883 /* Fill TX descriptor. */
3884 desc->num_tbs = 2 + nsegs;
3886 desc->tbs[0].lo = htole32(data->cmd_paddr);
3887 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3889 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3890 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3891 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3892 + hdrlen + pad - TB0_SIZE) << 4);
3894 /* Other DMA segments are for data payload. */
3895 for (i = 0; i < nsegs; i++) {
3897 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3898 desc->tbs[i+2].hi_n_len = \
3899 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3900 | ((seg->ds_len) << 4);
3903 bus_dmamap_sync(ring->data_dmat, data->map,
3904 BUS_DMASYNC_PREWRITE);
3905 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3906 BUS_DMASYNC_PREWRITE);
3907 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3908 BUS_DMASYNC_PREWRITE);
3911 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3915 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3916 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3918 /* Mark TX ring as full if we reach a certain threshold. */
3919 if (++ring->queued > IWM_TX_RING_HIMARK) {
3920 sc->qfullmsk |= 1 << ring->qid;
3927 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3928 const struct ieee80211_bpf_params *params)
3930 struct ieee80211com *ic = ni->ni_ic;
3931 struct iwm_softc *sc = ic->ic_softc;
3934 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3935 "->%s begin\n", __func__);
3937 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3939 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3940 "<-%s not RUNNING\n", __func__);
3946 if (params == NULL) {
3947 error = iwm_tx(sc, m, ni, 0);
3949 error = iwm_tx(sc, m, ni, 0);
3951 sc->sc_tx_timer = 5;
3962 * Note that there are transports that buffer frames before they reach
3963 * the firmware. This means that after flush_tx_path is called, the
3964 * queue might not be empty. The race-free way to handle this is to:
3965 * 1) set the station as draining
3966 * 2) flush the Tx path
3967 * 3) wait for the transport queues to be empty
3970 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3973 struct iwm_tx_path_flush_cmd flush_cmd = {
3974 .queues_ctl = htole32(tfd_msk),
3975 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3978 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3979 sizeof(flush_cmd), &flush_cmd);
3981 device_printf(sc->sc_dev,
3982 "Flushing tx queue failed: %d\n", ret);
3991 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3992 struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3994 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3998 /* send station add/update command to firmware */
4000 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
4002 struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
4006 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4008 add_sta_cmd.sta_id = IWM_STATION_ID;
4009 add_sta_cmd.mac_id_n_color
4010 = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
4011 IWM_DEFAULT_COLOR));
4014 for (ac = 0; ac < WME_NUM_AC; ac++) {
4015 add_sta_cmd.tfd_queue_msk |=
4016 htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
4018 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
4020 add_sta_cmd.add_modify = update ? 1 : 0;
4021 add_sta_cmd.station_flags_msk
4022 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4023 add_sta_cmd.tid_disable_tx = htole16(0xffff);
4025 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
4027 status = IWM_ADD_STA_SUCCESS;
4028 ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
4033 case IWM_ADD_STA_SUCCESS:
4037 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
4045 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
4047 return iwm_mvm_sta_send_to_fw(sc, in, 0);
4051 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
4053 return iwm_mvm_sta_send_to_fw(sc, in, 1);
4057 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
4058 const uint8_t *addr, uint16_t mac_id, uint16_t color)
4060 struct iwm_mvm_add_sta_cmd_v7 cmd;
4064 memset(&cmd, 0, sizeof(cmd));
4065 cmd.sta_id = sta->sta_id;
4066 cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
4068 cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
4069 cmd.tid_disable_tx = htole16(0xffff);
4072 IEEE80211_ADDR_COPY(cmd.addr, addr);
4074 ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
4079 case IWM_ADD_STA_SUCCESS:
4080 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
4081 "%s: Internal station added.\n", __func__);
4084 device_printf(sc->sc_dev,
4085 "%s: Add internal station failed, status=0x%x\n",
4094 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
4098 sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
4099 sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
4101 ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
4105 ret = iwm_mvm_add_int_sta_common(sc,
4106 &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4109 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4122 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
4124 struct iwm_time_quota_cmd cmd;
4125 int i, idx, ret, num_active_macs, quota, quota_rem;
4126 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4127 int n_ifs[IWM_MAX_BINDINGS] = {0, };
4130 memset(&cmd, 0, sizeof(cmd));
4132 /* currently, PHY ID == binding ID */
4134 id = in->in_phyctxt->id;
4135 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4136 colors[id] = in->in_phyctxt->color;
4143 * The FW's scheduling session consists of
4144 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
4145 * equally between all the bindings that require quota
4147 num_active_macs = 0;
4148 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4149 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4150 num_active_macs += n_ifs[i];
4155 if (num_active_macs) {
4156 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
4157 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
4160 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4164 cmd.quotas[idx].id_and_color =
4165 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4167 if (n_ifs[i] <= 0) {
4168 cmd.quotas[idx].quota = htole32(0);
4169 cmd.quotas[idx].max_duration = htole32(0);
4171 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4172 cmd.quotas[idx].max_duration = htole32(0);
4177 /* Give the remainder of the session to the first binding */
4178 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4180 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4183 device_printf(sc->sc_dev,
4184 "%s: Failed to send quota: %d\n", __func__, ret);
4193 * ieee80211 routines
4197 * Change to AUTH state in 80211 state machine. Roughly matches what
4198 * Linux does in bss_info_changed().
4201 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4203 struct ieee80211_node *ni;
4204 struct iwm_node *in;
4205 struct iwm_vap *iv = IWM_VAP(vap);
4210 * XXX i have a feeling that the vap node is being
4211 * freed from underneath us. Grr.
4213 ni = ieee80211_ref_node(vap->iv_bss);
4215 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4216 "%s: called; vap=%p, bss ni=%p\n",
4223 error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4227 error = iwm_allow_mcast(vap, sc);
4229 device_printf(sc->sc_dev,
4230 "%s: failed to set multicast\n", __func__);
4235 * This is where it deviates from what Linux does.
4237 * Linux iwlwifi doesn't reset the nic each time, nor does it
4238 * call ctxt_add() here. Instead, it adds it during vap creation,
4239 * and always does a mac_ctx_changed().
4241 * The openbsd port doesn't attempt to do that - it reset things
4242 * at odd states and does the add here.
4244 * So, until the state handling is fixed (ie, we never reset
4245 * the NIC except for a firmware failure, which should drag
4246 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4247 * contexts that are required), let's do a dirty hack here.
4249 if (iv->is_uploaded) {
4250 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4251 device_printf(sc->sc_dev,
4252 "%s: failed to update MAC\n", __func__);
4255 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4256 in->in_ni.ni_chan, 1, 1)) != 0) {
4257 device_printf(sc->sc_dev,
4258 "%s: failed update phy ctxt\n", __func__);
4261 in->in_phyctxt = &sc->sc_phyctxt[0];
4263 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
4264 device_printf(sc->sc_dev,
4265 "%s: binding update cmd\n", __func__);
4268 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4269 device_printf(sc->sc_dev,
4270 "%s: failed to update sta\n", __func__);
4274 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4275 device_printf(sc->sc_dev,
4276 "%s: failed to add MAC\n", __func__);
4279 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4280 in->in_ni.ni_chan, 1, 1)) != 0) {
4281 device_printf(sc->sc_dev,
4282 "%s: failed add phy ctxt!\n", __func__);
4286 in->in_phyctxt = &sc->sc_phyctxt[0];
4288 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
4289 device_printf(sc->sc_dev,
4290 "%s: binding add cmd\n", __func__);
4293 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4294 device_printf(sc->sc_dev,
4295 "%s: failed to add sta\n", __func__);
4301 * Prevent the FW from wandering off channel during association
4302 * by "protecting" the session with a time event.
4304 /* XXX duration is in units of TU, not MS */
4305 duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4306 iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
4311 ieee80211_free_node(ni);
4316 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4318 struct iwm_node *in = IWM_NODE(vap->iv_bss);
4321 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4322 device_printf(sc->sc_dev,
4323 "%s: failed to update STA\n", __func__);
4328 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4329 device_printf(sc->sc_dev,
4330 "%s: failed to update MAC\n", __func__);
4338 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4343 * Ok, so *technically* the proper set of calls for going
4344 * from RUN back to SCAN is:
4346 * iwm_mvm_power_mac_disable(sc, in);
4347 * iwm_mvm_mac_ctxt_changed(sc, in);
4348 * iwm_mvm_rm_sta(sc, in);
4349 * iwm_mvm_update_quotas(sc, NULL);
4350 * iwm_mvm_mac_ctxt_changed(sc, in);
4351 * iwm_mvm_binding_remove_vif(sc, in);
4352 * iwm_mvm_mac_ctxt_remove(sc, in);
4354 * However, that freezes the device not matter which permutations
4355 * and modifications are attempted. Obviously, this driver is missing
4356 * something since it works in the Linux driver, but figuring out what
4357 * is missing is a little more complicated. Now, since we're going
4358 * back to nothing anyway, we'll just do a complete device reset.
4359 * Up your's, device!
4362 * Just using 0xf for the queues mask is fine as long as we only
4363 * get here from RUN state.
4366 mbufq_drain(&sc->sc_snd);
4367 iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4369 * We seem to get away with just synchronously sending the
4370 * IWM_TXPATH_FLUSH command.
4372 // iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4373 iwm_stop_device(sc);
4382 iwm_mvm_power_mac_disable(sc, in);
4384 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4385 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4389 if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4390 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4393 error = iwm_mvm_rm_sta(sc, in);
4395 iwm_mvm_update_quotas(sc, NULL);
4396 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4397 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4400 iwm_mvm_binding_remove_vif(sc, in);
4402 iwm_mvm_mac_ctxt_remove(sc, in);
4408 static struct ieee80211_node *
4409 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4411 return malloc(sizeof (struct iwm_node), M_80211_NODE,
4416 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4418 struct ieee80211_node *ni = &in->in_ni;
4419 struct iwm_lq_cmd *lq = &in->in_lq;
4420 int nrates = ni->ni_rates.rs_nrates;
4421 int i, ridx, tab = 0;
4424 if (nrates > nitems(lq->rs_table)) {
4425 device_printf(sc->sc_dev,
4426 "%s: node supports %d rates, driver handles "
4427 "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4431 device_printf(sc->sc_dev,
4432 "%s: node supports 0 rates, odd!\n", __func__);
4437 * XXX .. and most of iwm_node is not initialised explicitly;
4438 * it's all just 0x0 passed to the firmware.
4441 /* first figure out which rates we should support */
4442 /* XXX TODO: this isn't 11n aware /at all/ */
4443 memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4444 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4445 "%s: nrates=%d\n", __func__, nrates);
4448 * Loop over nrates and populate in_ridx from the highest
4449 * rate to the lowest rate. Remember, in_ridx[] has
4450 * IEEE80211_RATE_MAXSIZE entries!
4452 for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4453 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4455 /* Map 802.11 rate to HW rate index. */
4456 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4457 if (iwm_rates[ridx].rate == rate)
4459 if (ridx > IWM_RIDX_MAX) {
4460 device_printf(sc->sc_dev,
4461 "%s: WARNING: device rate for %d not found!\n",
4464 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4465 "%s: rate: i: %d, rate=%d, ridx=%d\n",
4470 in->in_ridx[i] = ridx;
4474 /* then construct a lq_cmd based on those */
4475 memset(lq, 0, sizeof(*lq));
4476 lq->sta_id = IWM_STATION_ID;
4478 /* For HT, always enable RTS/CTS to avoid excessive retries. */
4479 if (ni->ni_flags & IEEE80211_NODE_HT)
4480 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4483 * are these used? (we don't do SISO or MIMO)
4484 * need to set them to non-zero, though, or we get an error.
4486 lq->single_stream_ant_msk = 1;
4487 lq->dual_stream_ant_msk = 1;
4490 * Build the actual rate selection table.
4491 * The lowest bits are the rates. Additionally,
4492 * CCK needs bit 9 to be set. The rest of the bits
4493 * we add to the table select the tx antenna
4494 * Note that we add the rates in the highest rate first
4495 * (opposite of ni_rates).
4498 * XXX TODO: this should be looping over the min of nrates
4499 * and LQ_MAX_RETRY_NUM. Sigh.
4501 for (i = 0; i < nrates; i++) {
4506 txant = iwm_mvm_get_valid_tx_ant(sc);
4507 nextant = 1<<(ffs(txant)-1);
4510 nextant = iwm_mvm_get_valid_tx_ant(sc);
4513 * Map the rate id into a rate index into
4514 * our hardware table containing the
4515 * configuration to use for this rate.
4517 ridx = in->in_ridx[i];
4518 tab = iwm_rates[ridx].plcp;
4519 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4520 if (IWM_RIDX_IS_CCK(ridx))
4521 tab |= IWM_RATE_MCS_CCK_MSK;
4522 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4523 "station rate i=%d, rate=%d, hw=%x\n",
4524 i, iwm_rates[ridx].rate, tab);
4525 lq->rs_table[i] = htole32(tab);
4527 /* then fill the rest with the lowest possible rate */
4528 for (i = nrates; i < nitems(lq->rs_table); i++) {
4529 KASSERT(tab != 0, ("invalid tab"));
4530 lq->rs_table[i] = htole32(tab);
4535 iwm_media_change(struct ifnet *ifp)
4537 struct ieee80211vap *vap = ifp->if_softc;
4538 struct ieee80211com *ic = vap->iv_ic;
4539 struct iwm_softc *sc = ic->ic_softc;
4542 error = ieee80211_media_change(ifp);
4543 if (error != ENETRESET)
4547 if (ic->ic_nrunning > 0) {
4557 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4559 struct iwm_vap *ivp = IWM_VAP(vap);
4560 struct ieee80211com *ic = vap->iv_ic;
4561 struct iwm_softc *sc = ic->ic_softc;
4562 struct iwm_node *in;
4565 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4566 "switching state %s -> %s\n",
4567 ieee80211_state_name[vap->iv_state],
4568 ieee80211_state_name[nstate]);
4569 IEEE80211_UNLOCK(ic);
4572 if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4573 iwm_led_blink_stop(sc);
4575 /* disable beacon filtering if we're hopping out of RUN */
4576 if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4577 iwm_mvm_disable_beacon_filter(sc);
4579 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4582 if (nstate == IEEE80211_S_INIT) {
4585 error = ivp->iv_newstate(vap, nstate, arg);
4586 IEEE80211_UNLOCK(ic);
4588 iwm_release(sc, NULL);
4595 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4596 * above then the card will be completely reinitialized,
4597 * so the driver must do everything necessary to bring the card
4598 * from INIT to SCAN.
4600 * Additionally, upon receiving deauth frame from AP,
4601 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4602 * state. This will also fail with this driver, so bring the FSM
4603 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4605 * XXX TODO: fix this for FreeBSD!
4607 if (nstate == IEEE80211_S_SCAN ||
4608 nstate == IEEE80211_S_AUTH ||
4609 nstate == IEEE80211_S_ASSOC) {
4610 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4611 "Force transition to INIT; MGT=%d\n", arg);
4614 /* Always pass arg as -1 since we can't Tx right now. */
4616 * XXX arg is just ignored anyway when transitioning
4617 * to IEEE80211_S_INIT.
4619 vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4620 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4621 "Going INIT->SCAN\n");
4622 nstate = IEEE80211_S_SCAN;
4623 IEEE80211_UNLOCK(ic);
4629 case IEEE80211_S_INIT:
4632 case IEEE80211_S_AUTH:
4633 if ((error = iwm_auth(vap, sc)) != 0) {
4634 device_printf(sc->sc_dev,
4635 "%s: could not move to auth state: %d\n",
4641 case IEEE80211_S_ASSOC:
4642 if ((error = iwm_assoc(vap, sc)) != 0) {
4643 device_printf(sc->sc_dev,
4644 "%s: failed to associate: %d\n", __func__,
4650 case IEEE80211_S_RUN:
4652 struct iwm_host_cmd cmd = {
4654 .len = { sizeof(in->in_lq), },
4655 .flags = IWM_CMD_SYNC,
4658 /* Update the association state, now we have it all */
4659 /* (eg associd comes in at this point */
4660 error = iwm_assoc(vap, sc);
4662 device_printf(sc->sc_dev,
4663 "%s: failed to update association state: %d\n",
4669 in = IWM_NODE(vap->iv_bss);
4670 iwm_mvm_power_mac_update_mode(sc, in);
4671 iwm_mvm_enable_beacon_filter(sc, in);
4672 iwm_mvm_update_quotas(sc, in);
4673 iwm_setrates(sc, in);
4675 cmd.data[0] = &in->in_lq;
4676 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4677 device_printf(sc->sc_dev,
4678 "%s: IWM_LQ_CMD failed\n", __func__);
4681 iwm_mvm_led_enable(sc);
4691 return (ivp->iv_newstate(vap, nstate, arg));
4695 iwm_endscan_cb(void *arg, int pending)
4697 struct iwm_softc *sc = arg;
4698 struct ieee80211com *ic = &sc->sc_ic;
4700 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4704 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4708 * Aging and idle timeouts for the different possible scenarios
4709 * in default configuration
4711 static const uint32_t
4712 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4714 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4715 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4718 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4719 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4722 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4723 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4726 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4727 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4730 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4731 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4736 * Aging and idle timeouts for the different possible scenarios
4737 * in single BSS MAC configuration.
4739 static const uint32_t
4740 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4742 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4743 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4746 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4747 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4750 htole32(IWM_SF_MCAST_AGING_TIMER),
4751 htole32(IWM_SF_MCAST_IDLE_TIMER)
4754 htole32(IWM_SF_BA_AGING_TIMER),
4755 htole32(IWM_SF_BA_IDLE_TIMER)
4758 htole32(IWM_SF_TX_RE_AGING_TIMER),
4759 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4764 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4765 struct ieee80211_node *ni)
4767 int i, j, watermark;
4769 sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4772 * If we are in association flow - check antenna configuration
4773 * capabilities of the AP station, and choose the watermark accordingly.
4776 if (ni->ni_flags & IEEE80211_NODE_HT) {
4778 if (ni->ni_rxmcs[2] != 0)
4779 watermark = IWM_SF_W_MARK_MIMO3;
4780 else if (ni->ni_rxmcs[1] != 0)
4781 watermark = IWM_SF_W_MARK_MIMO2;
4784 watermark = IWM_SF_W_MARK_SISO;
4786 watermark = IWM_SF_W_MARK_LEGACY;
4788 /* default watermark value for unassociated mode. */
4790 watermark = IWM_SF_W_MARK_MIMO2;
4792 sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4794 for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4795 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4796 sf_cmd->long_delay_timeouts[i][j] =
4797 htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4802 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4803 sizeof(iwm_sf_full_timeout));
4805 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4806 sizeof(iwm_sf_full_timeout_def));
4811 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4813 struct ieee80211com *ic = &sc->sc_ic;
4814 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4815 struct iwm_sf_cfg_cmd sf_cmd = {
4816 .state = htole32(IWM_SF_FULL_ON),
4820 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4821 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4823 switch (new_state) {
4825 case IWM_SF_INIT_OFF:
4826 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4828 case IWM_SF_FULL_ON:
4829 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4832 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4833 "Invalid state: %d. not sending Smart Fifo cmd\n",
4838 ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4839 sizeof(sf_cmd), &sf_cmd);
4844 iwm_send_bt_init_conf(struct iwm_softc *sc)
4846 struct iwm_bt_coex_cmd bt_cmd;
4848 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4849 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4851 return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4856 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4858 struct iwm_mcc_update_cmd mcc_cmd;
4859 struct iwm_host_cmd hcmd = {
4860 .id = IWM_MCC_UPDATE_CMD,
4861 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4862 .data = { &mcc_cmd },
4866 struct iwm_rx_packet *pkt;
4867 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4868 struct iwm_mcc_update_resp *mcc_resp;
4872 int resp_v2 = isset(sc->sc_enabled_capa,
4873 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4875 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4876 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4877 if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4878 isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4879 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4881 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4884 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4886 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4888 IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4889 "send MCC update to FW with '%c%c' src = %d\n",
4890 alpha2[0], alpha2[1], mcc_cmd.source_id);
4892 ret = iwm_send_cmd(sc, &hcmd);
4897 pkt = hcmd.resp_pkt;
4899 /* Extract MCC response */
4901 mcc_resp = (void *)pkt->data;
4902 mcc = mcc_resp->mcc;
4903 n_channels = le32toh(mcc_resp->n_channels);
4905 mcc_resp_v1 = (void *)pkt->data;
4906 mcc = mcc_resp_v1->mcc;
4907 n_channels = le32toh(mcc_resp_v1->n_channels);
4910 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4912 mcc = 0x3030; /* "00" - world */
4914 IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4915 "regulatory domain '%c%c' (%d channels available)\n",
4916 mcc >> 8, mcc & 0xff, n_channels);
4918 iwm_free_resp(sc, &hcmd);
4924 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4926 struct iwm_host_cmd cmd = {
4927 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4928 .len = { sizeof(uint32_t), },
4929 .data = { &backoff, },
4932 if (iwm_send_cmd(sc, &cmd) != 0) {
4933 device_printf(sc->sc_dev,
4934 "failed to change thermal tx backoff\n");
4939 iwm_init_hw(struct iwm_softc *sc)
4941 struct ieee80211com *ic = &sc->sc_ic;
4944 if ((error = iwm_start_hw(sc)) != 0) {
4945 printf("iwm_start_hw: failed %d\n", error);
4949 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4950 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4955 * should stop and start HW since that INIT
4958 iwm_stop_device(sc);
4959 if ((error = iwm_start_hw(sc)) != 0) {
4960 device_printf(sc->sc_dev, "could not initialize hardware\n");
4964 /* omstart, this time with the regular firmware */
4965 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4967 device_printf(sc->sc_dev, "could not load firmware\n");
4971 if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4972 device_printf(sc->sc_dev, "bt init conf failed\n");
4976 error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4978 device_printf(sc->sc_dev, "antenna config failed\n");
4982 /* Send phy db control command and then phy db calibration */
4983 if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4986 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4987 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4991 /* Add auxiliary station for scanning */
4992 if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4993 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4997 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4999 * The channel used here isn't relevant as it's
5000 * going to be overwritten in the other flows.
5001 * For now use the first channel we have.
5003 if ((error = iwm_mvm_phy_ctxt_add(sc,
5004 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
5008 /* Initialize tx backoffs to the minimum. */
5009 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
5010 iwm_mvm_tt_tx_backoff(sc, 0);
5012 error = iwm_mvm_power_update_device(sc);
5016 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
5017 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
5021 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
5022 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
5026 /* Enable Tx queues. */
5027 for (ac = 0; ac < WME_NUM_AC; ac++) {
5028 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
5029 iwm_mvm_ac_to_tx_fifo[ac]);
5034 if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
5035 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
5042 iwm_stop_device(sc);
5046 /* Allow multicast from our BSSID. */
5048 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
5050 struct ieee80211_node *ni = vap->iv_bss;
5051 struct iwm_mcast_filter_cmd *cmd;
5055 size = roundup(sizeof(*cmd), 4);
5056 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
5059 cmd->filter_own = 1;
5063 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
5065 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
5066 IWM_CMD_SYNC, size, cmd);
5067 free(cmd, M_DEVBUF);
5077 iwm_init(struct iwm_softc *sc)
5081 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
5084 sc->sc_generation++;
5085 sc->sc_flags &= ~IWM_FLAG_STOPPED;
5087 if ((error = iwm_init_hw(sc)) != 0) {
5088 printf("iwm_init_hw failed %d\n", error);
5094 * Ok, firmware loaded and we are jogging
5096 sc->sc_flags |= IWM_FLAG_HW_INITED;
5097 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5101 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
5103 struct iwm_softc *sc;
5109 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
5113 error = mbufq_enqueue(&sc->sc_snd, m);
5124 * Dequeue packets from sendq and call send.
5127 iwm_start(struct iwm_softc *sc)
5129 struct ieee80211_node *ni;
5133 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5134 while (sc->qfullmsk == 0 &&
5135 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5136 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5137 if (iwm_tx(sc, m, ni, ac) != 0) {
5138 if_inc_counter(ni->ni_vap->iv_ifp,
5139 IFCOUNTER_OERRORS, 1);
5140 ieee80211_free_node(ni);
5143 sc->sc_tx_timer = 15;
5145 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5149 iwm_stop(struct iwm_softc *sc)
5152 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5153 sc->sc_flags |= IWM_FLAG_STOPPED;
5154 sc->sc_generation++;
5155 iwm_led_blink_stop(sc);
5156 sc->sc_tx_timer = 0;
5157 iwm_stop_device(sc);
5158 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5162 iwm_watchdog(void *arg)
5164 struct iwm_softc *sc = arg;
5165 struct ieee80211com *ic = &sc->sc_ic;
5167 if (sc->sc_tx_timer > 0) {
5168 if (--sc->sc_tx_timer == 0) {
5169 device_printf(sc->sc_dev, "device timeout\n");
5173 ieee80211_restart_all(ic);
5174 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5178 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5182 iwm_parent(struct ieee80211com *ic)
5184 struct iwm_softc *sc = ic->ic_softc;
5188 if (ic->ic_nrunning > 0) {
5189 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5193 } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5197 ieee80211_start_all(ic);
5201 * The interrupt side of things
5205 * error dumping routines are from iwlwifi/mvm/utils.c
5209 * Note: This structure is read from the device with IO accesses,
5210 * and the reading already does the endian conversion. As it is
5211 * read with uint32_t-sized accesses, any members with a different size
5212 * need to be ordered correctly though!
5214 struct iwm_error_event_table {
5215 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5216 uint32_t error_id; /* type of error */
5217 uint32_t trm_hw_status0; /* TRM HW status */
5218 uint32_t trm_hw_status1; /* TRM HW status */
5219 uint32_t blink2; /* branch link */
5220 uint32_t ilink1; /* interrupt link */
5221 uint32_t ilink2; /* interrupt link */
5222 uint32_t data1; /* error-specific data */
5223 uint32_t data2; /* error-specific data */
5224 uint32_t data3; /* error-specific data */
5225 uint32_t bcon_time; /* beacon timer */
5226 uint32_t tsf_low; /* network timestamp function timer */
5227 uint32_t tsf_hi; /* network timestamp function timer */
5228 uint32_t gp1; /* GP1 timer register */
5229 uint32_t gp2; /* GP2 timer register */
5230 uint32_t fw_rev_type; /* firmware revision type */
5231 uint32_t major; /* uCode version major */
5232 uint32_t minor; /* uCode version minor */
5233 uint32_t hw_ver; /* HW Silicon version */
5234 uint32_t brd_ver; /* HW board version */
5235 uint32_t log_pc; /* log program counter */
5236 uint32_t frame_ptr; /* frame pointer */
5237 uint32_t stack_ptr; /* stack pointer */
5238 uint32_t hcmd; /* last host command header */
5239 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
5241 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
5243 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
5245 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
5247 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
5249 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
5250 uint32_t wait_event; /* wait event() caller address */
5251 uint32_t l2p_control; /* L2pControlField */
5252 uint32_t l2p_duration; /* L2pDurationField */
5253 uint32_t l2p_mhvalid; /* L2pMhValidBits */
5254 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
5255 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
5257 uint32_t u_timestamp; /* indicate when the date and time of the
5259 uint32_t flow_handler; /* FH read/write pointers, RX credit */
5260 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5263 * UMAC error struct - relevant starting from family 8000 chip.
5264 * Note: This structure is read from the device with IO accesses,
5265 * and the reading already does the endian conversion. As it is
5266 * read with u32-sized accesses, any members with a different size
5267 * need to be ordered correctly though!
5269 struct iwm_umac_error_event_table {
5270 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5271 uint32_t error_id; /* type of error */
5272 uint32_t blink1; /* branch link */
5273 uint32_t blink2; /* branch link */
5274 uint32_t ilink1; /* interrupt link */
5275 uint32_t ilink2; /* interrupt link */
5276 uint32_t data1; /* error-specific data */
5277 uint32_t data2; /* error-specific data */
5278 uint32_t data3; /* error-specific data */
5279 uint32_t umac_major;
5280 uint32_t umac_minor;
5281 uint32_t frame_pointer; /* core register 27*/
5282 uint32_t stack_pointer; /* core register 28 */
5283 uint32_t cmd_header; /* latest host cmd sent to UMAC */
5284 uint32_t nic_isr_pref; /* ISR status register */
5287 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
5288 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
5294 } advanced_lookup[] = {
5295 { "NMI_INTERRUPT_WDG", 0x34 },
5296 { "SYSASSERT", 0x35 },
5297 { "UCODE_VERSION_MISMATCH", 0x37 },
5298 { "BAD_COMMAND", 0x38 },
5299 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5300 { "FATAL_ERROR", 0x3D },
5301 { "NMI_TRM_HW_ERR", 0x46 },
5302 { "NMI_INTERRUPT_TRM", 0x4C },
5303 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5304 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5305 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5306 { "NMI_INTERRUPT_HOST", 0x66 },
5307 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5308 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5309 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5310 { "ADVANCED_SYSASSERT", 0 },
5314 iwm_desc_lookup(uint32_t num)
5318 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5319 if (advanced_lookup[i].num == num)
5320 return advanced_lookup[i].name;
5322 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5323 return advanced_lookup[i].name;
5327 iwm_nic_umac_error(struct iwm_softc *sc)
5329 struct iwm_umac_error_event_table table;
5332 base = sc->umac_error_event_table;
5334 if (base < 0x800000) {
5335 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5340 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5341 device_printf(sc->sc_dev, "reading errlog failed\n");
5345 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5346 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5347 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5348 sc->sc_flags, table.valid);
5351 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5352 iwm_desc_lookup(table.error_id));
5353 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5354 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5355 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5357 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5359 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5360 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5361 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5362 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5363 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5364 device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5365 table.frame_pointer);
5366 device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5367 table.stack_pointer);
5368 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5369 device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5370 table.nic_isr_pref);
5374 * Support for dumping the error log seemed like a good idea ...
5375 * but it's mostly hex junk and the only sensible thing is the
5376 * hw/ucode revision (which we know anyway). Since it's here,
5377 * I'll just leave it in, just in case e.g. the Intel guys want to
5378 * help us decipher some "ADVANCED_SYSASSERT" later.
5381 iwm_nic_error(struct iwm_softc *sc)
5383 struct iwm_error_event_table table;
5386 device_printf(sc->sc_dev, "dumping device error log\n");
5387 base = sc->error_event_table;
5388 if (base < 0x800000) {
5389 device_printf(sc->sc_dev,
5390 "Invalid error log pointer 0x%08x\n", base);
5394 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5395 device_printf(sc->sc_dev, "reading errlog failed\n");
5400 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5404 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5405 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5406 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5407 sc->sc_flags, table.valid);
5410 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5411 iwm_desc_lookup(table.error_id));
5412 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5413 table.trm_hw_status0);
5414 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5415 table.trm_hw_status1);
5416 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5417 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5418 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5419 device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5420 device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5421 device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5422 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5423 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5424 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5425 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5426 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5427 device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5429 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5430 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5431 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5432 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5433 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5434 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5435 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5436 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5437 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5438 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5439 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5440 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5441 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5442 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5443 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5444 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5445 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5446 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5447 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5449 if (sc->umac_error_event_table)
5450 iwm_nic_umac_error(sc);
5454 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5457 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5458 * Basic structure from if_iwn
5461 iwm_notif_intr(struct iwm_softc *sc)
5463 struct ieee80211com *ic = &sc->sc_ic;
5466 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5467 BUS_DMASYNC_POSTREAD);
5469 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5474 while (sc->rxq.cur != hw) {
5475 struct iwm_rx_ring *ring = &sc->rxq;
5476 struct iwm_rx_data *data = &ring->data[ring->cur];
5477 struct iwm_rx_packet *pkt;
5478 struct iwm_cmd_response *cresp;
5481 bus_dmamap_sync(ring->data_dmat, data->map,
5482 BUS_DMASYNC_POSTREAD);
5483 pkt = mtod(data->m, struct iwm_rx_packet *);
5485 qid = pkt->hdr.qid & ~0x80;
5488 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5489 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5490 "rx packet qid=%d idx=%d type=%x %d %d\n",
5491 pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5494 * randomly get these from the firmware, no idea why.
5495 * they at least seem harmless, so just ignore them for now
5497 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5498 || pkt->len_n_flags == htole32(0x55550000))) {
5503 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5506 case IWM_REPLY_RX_PHY_CMD:
5507 iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5510 case IWM_REPLY_RX_MPDU_CMD:
5511 iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5515 iwm_mvm_rx_tx_cmd(sc, pkt, data);
5518 case IWM_MISSED_BEACONS_NOTIFICATION: {
5519 struct iwm_missed_beacons_notif *resp;
5522 /* XXX look at mac_id to determine interface ID */
5523 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5525 resp = (void *)pkt->data;
5526 missed = le32toh(resp->consec_missed_beacons);
5528 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5529 "%s: MISSED_BEACON: mac_id=%d, "
5530 "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5533 le32toh(resp->mac_id),
5534 le32toh(resp->consec_missed_beacons_since_last_rx),
5535 le32toh(resp->consec_missed_beacons),
5536 le32toh(resp->num_expected_beacons),
5537 le32toh(resp->num_recvd_beacons));
5543 /* XXX no net80211 locking? */
5544 if (vap->iv_state == IEEE80211_S_RUN &&
5545 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5546 if (missed > vap->iv_bmissthreshold) {
5547 /* XXX bad locking; turn into task */
5549 ieee80211_beacon_miss(ic);
5556 case IWM_MFUART_LOAD_NOTIFICATION:
5562 case IWM_CALIB_RES_NOTIF_PHY_DB:
5565 case IWM_STATISTICS_NOTIFICATION: {
5566 struct iwm_notif_statistics *stats;
5567 stats = (void *)pkt->data;
5568 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5569 sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5572 case IWM_NVM_ACCESS_CMD:
5573 case IWM_MCC_UPDATE_CMD:
5574 if (sc->sc_wantresp == ((qid << 16) | idx)) {
5575 memcpy(sc->sc_cmd_resp,
5576 pkt, sizeof(sc->sc_cmd_resp));
5580 case IWM_MCC_CHUB_UPDATE_CMD: {
5581 struct iwm_mcc_chub_notif *notif;
5582 notif = (void *)pkt->data;
5584 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5585 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5586 sc->sc_fw_mcc[2] = '\0';
5587 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5588 "fw source %d sent CC '%s'\n",
5589 notif->source_id, sc->sc_fw_mcc);
5592 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5593 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5594 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5595 struct iwm_dts_measurement_notif_v1 *notif;
5597 if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5598 device_printf(sc->sc_dev,
5599 "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5602 notif = (void *)pkt->data;
5603 IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5604 "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5609 case IWM_PHY_CONFIGURATION_CMD:
5610 case IWM_TX_ANT_CONFIGURATION_CMD:
5612 case IWM_MAC_CONTEXT_CMD:
5613 case IWM_REPLY_SF_CFG_CMD:
5614 case IWM_POWER_TABLE_CMD:
5615 case IWM_PHY_CONTEXT_CMD:
5616 case IWM_BINDING_CONTEXT_CMD:
5617 case IWM_TIME_EVENT_CMD:
5618 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5619 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5620 case IWM_SCAN_ABORT_UMAC:
5621 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5622 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5623 case IWM_REPLY_BEACON_FILTERING_CMD:
5624 case IWM_MAC_PM_POWER_TABLE:
5625 case IWM_TIME_QUOTA_CMD:
5626 case IWM_REMOVE_STA:
5627 case IWM_TXPATH_FLUSH:
5630 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5631 cresp = (void *)pkt->data;
5632 if (sc->sc_wantresp == ((qid << 16) | idx)) {
5633 memcpy(sc->sc_cmd_resp,
5634 pkt, sizeof(*pkt)+sizeof(*cresp));
5639 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5642 case IWM_INIT_COMPLETE_NOTIF:
5645 case IWM_SCAN_OFFLOAD_COMPLETE: {
5646 struct iwm_periodic_scan_complete *notif;
5647 notif = (void *)pkt->data;
5648 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5649 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5650 ieee80211_runtask(ic, &sc->sc_es_task);
5655 case IWM_SCAN_ITERATION_COMPLETE: {
5656 struct iwm_lmac_scan_complete_notif *notif;
5657 notif = (void *)pkt->data;
5658 ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5662 case IWM_SCAN_COMPLETE_UMAC: {
5663 struct iwm_umac_scan_complete *notif;
5664 notif = (void *)pkt->data;
5666 IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5667 "UMAC scan complete, status=0x%x\n",
5669 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5670 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5671 ieee80211_runtask(ic, &sc->sc_es_task);
5676 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5677 struct iwm_umac_scan_iter_complete_notif *notif;
5678 notif = (void *)pkt->data;
5680 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5681 "complete, status=0x%x, %d channels scanned\n",
5682 notif->status, notif->scanned_channels);
5683 ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5687 case IWM_REPLY_ERROR: {
5688 struct iwm_error_resp *resp;
5689 resp = (void *)pkt->data;
5691 device_printf(sc->sc_dev,
5692 "firmware error 0x%x, cmd 0x%x\n",
5693 le32toh(resp->error_type),
5698 case IWM_TIME_EVENT_NOTIFICATION: {
5699 struct iwm_time_event_notif *notif;
5700 notif = (void *)pkt->data;
5702 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5703 "TE notif status = 0x%x action = 0x%x\n",
5704 notif->status, notif->action);
5708 case IWM_MCAST_FILTER_CMD:
5711 case IWM_SCD_QUEUE_CFG: {
5712 struct iwm_scd_txq_cfg_rsp *rsp;
5713 rsp = (void *)pkt->data;
5715 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5716 "queue cfg token=0x%x sta_id=%d "
5717 "tid=%d scd_queue=%d\n",
5718 rsp->token, rsp->sta_id, rsp->tid,
5724 device_printf(sc->sc_dev,
5725 "frame %d/%d %x UNHANDLED (this should "
5726 "not happen)\n", qid, idx,
5732 * Why test bit 0x80? The Linux driver:
5734 * There is one exception: uCode sets bit 15 when it
5735 * originates the response/notification, i.e. when the
5736 * response/notification is not a direct response to a
5737 * command sent by the driver. For example, uCode issues
5738 * IWM_REPLY_RX when it sends a received frame to the driver;
5739 * it is not a direct response to any driver command.
5741 * Ok, so since when is 7 == 15? Well, the Linux driver
5742 * uses a slightly different format for pkt->hdr, and "qid"
5743 * is actually the upper byte of a two-byte field.
5745 if (!(pkt->hdr.qid & (1 << 7))) {
5746 iwm_cmd_done(sc, pkt);
5753 * Tell the firmware what we have processed.
5754 * Seems like the hardware gets upset unless we align
5757 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5758 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5764 struct iwm_softc *sc = arg;
5770 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5772 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5773 uint32_t *ict = sc->ict_dma.vaddr;
5776 tmp = htole32(ict[sc->ict_cur]);
5781 * ok, there was something. keep plowing until we have all.
5786 ict[sc->ict_cur] = 0;
5787 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5788 tmp = htole32(ict[sc->ict_cur]);
5791 /* this is where the fun begins. don't ask */
5792 if (r1 == 0xffffffff)
5795 /* i am not expected to understand this */
5798 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5800 r1 = IWM_READ(sc, IWM_CSR_INT);
5801 /* "hardware gone" (where, fishing?) */
5802 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5804 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5806 if (r1 == 0 && r2 == 0) {
5810 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5812 /* Safely ignore these bits for debug checks below */
5813 r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5815 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5817 struct ieee80211com *ic = &sc->sc_ic;
5818 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5823 /* Dump driver status (TX and RX rings) while we're here. */
5824 device_printf(sc->sc_dev, "driver status:\n");
5825 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5826 struct iwm_tx_ring *ring = &sc->txq[i];
5827 device_printf(sc->sc_dev,
5828 " tx ring %2d: qid=%-2d cur=%-3d "
5830 i, ring->qid, ring->cur, ring->queued);
5832 device_printf(sc->sc_dev,
5833 " rx ring: cur=%d\n", sc->rxq.cur);
5834 device_printf(sc->sc_dev,
5835 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5837 /* Don't stop the device; just do a VAP restart */
5841 printf("%s: null vap\n", __func__);
5845 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5846 "restarting\n", __func__, vap->iv_state);
5848 /* XXX TODO: turn this into a callout/taskqueue */
5849 ieee80211_restart_all(ic);
5853 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5854 handled |= IWM_CSR_INT_BIT_HW_ERR;
5855 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5861 /* firmware chunk loaded */
5862 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5863 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5864 handled |= IWM_CSR_INT_BIT_FH_TX;
5865 sc->sc_fw_chunk_done = 1;
5869 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5870 handled |= IWM_CSR_INT_BIT_RF_KILL;
5871 if (iwm_check_rfkill(sc)) {
5872 device_printf(sc->sc_dev,
5873 "%s: rfkill switch, disabling interface\n",
5880 * The Linux driver uses periodic interrupts to avoid races.
5881 * We cargo-cult like it's going out of fashion.
5883 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5884 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5885 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5886 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5888 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5892 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5893 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5894 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5898 /* enable periodic interrupt, see above */
5899 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5900 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5901 IWM_CSR_INT_PERIODIC_ENA);
5904 if (__predict_false(r1 & ~handled))
5905 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5906 "%s: unhandled interrupts: %x\n", __func__, r1);
5910 iwm_restore_interrupts(sc);
5917 * Autoconf glue-sniffing
5919 #define PCI_VENDOR_INTEL 0x8086
5920 #define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3
5921 #define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4
5922 #define PCI_PRODUCT_INTEL_WL_3165_1 0x3165
5923 #define PCI_PRODUCT_INTEL_WL_3165_2 0x3166
5924 #define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1
5925 #define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2
5926 #define PCI_PRODUCT_INTEL_WL_7265_1 0x095a
5927 #define PCI_PRODUCT_INTEL_WL_7265_2 0x095b
5928 #define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3
5929 #define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4
5931 static const struct iwm_devices {
5935 { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5936 { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5937 { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5938 { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5939 { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5940 { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5941 { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5942 { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5943 { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5944 { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5948 iwm_probe(device_t dev)
5952 for (i = 0; i < nitems(iwm_devices); i++) {
5953 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5954 pci_get_device(dev) == iwm_devices[i].device) {
5955 device_set_desc(dev, iwm_devices[i].name);
5956 return (BUS_PROBE_DEFAULT);
5964 iwm_dev_check(device_t dev)
5966 struct iwm_softc *sc;
5968 sc = device_get_softc(dev);
5970 switch (pci_get_device(dev)) {
5971 case PCI_PRODUCT_INTEL_WL_3160_1:
5972 case PCI_PRODUCT_INTEL_WL_3160_2:
5973 sc->cfg = &iwm3160_cfg;
5975 case PCI_PRODUCT_INTEL_WL_3165_1:
5976 case PCI_PRODUCT_INTEL_WL_3165_2:
5977 sc->cfg = &iwm3165_cfg;
5979 case PCI_PRODUCT_INTEL_WL_7260_1:
5980 case PCI_PRODUCT_INTEL_WL_7260_2:
5981 sc->cfg = &iwm7260_cfg;
5983 case PCI_PRODUCT_INTEL_WL_7265_1:
5984 case PCI_PRODUCT_INTEL_WL_7265_2:
5985 sc->cfg = &iwm7265_cfg;
5987 case PCI_PRODUCT_INTEL_WL_8260_1:
5988 case PCI_PRODUCT_INTEL_WL_8260_2:
5989 sc->cfg = &iwm8260_cfg;
5992 device_printf(dev, "unknown adapter type\n");
5998 #define PCI_CFG_RETRY_TIMEOUT 0x041
6001 iwm_pci_attach(device_t dev)
6003 struct iwm_softc *sc;
6004 int count, error, rid;
6007 sc = device_get_softc(dev);
6009 /* We disable the RETRY_TIMEOUT register (0x41) to keep
6010 * PCI Tx retries from interfering with C3 CPU state */
6011 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6013 /* Enable bus-mastering and hardware bug workaround. */
6014 pci_enable_busmaster(dev);
6015 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
6017 if (reg & PCIM_STATUS_INTxSTATE) {
6018 reg &= ~PCIM_STATUS_INTxSTATE;
6020 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
6023 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
6025 if (sc->sc_mem == NULL) {
6026 device_printf(sc->sc_dev, "can't map mem space\n");
6029 sc->sc_st = rman_get_bustag(sc->sc_mem);
6030 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
6032 /* Install interrupt handler. */
6035 if (pci_alloc_msi(dev, &count) == 0)
6037 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
6038 (rid != 0 ? 0 : RF_SHAREABLE));
6039 if (sc->sc_irq == NULL) {
6040 device_printf(dev, "can't map interrupt\n");
6043 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
6044 NULL, iwm_intr, sc, &sc->sc_ih);
6045 if (sc->sc_ih == NULL) {
6046 device_printf(dev, "can't establish interrupt");
6049 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
6055 iwm_pci_detach(device_t dev)
6057 struct iwm_softc *sc = device_get_softc(dev);
6059 if (sc->sc_irq != NULL) {
6060 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
6061 bus_release_resource(dev, SYS_RES_IRQ,
6062 rman_get_rid(sc->sc_irq), sc->sc_irq);
6063 pci_release_msi(dev);
6065 if (sc->sc_mem != NULL)
6066 bus_release_resource(dev, SYS_RES_MEMORY,
6067 rman_get_rid(sc->sc_mem), sc->sc_mem);
6073 iwm_attach(device_t dev)
6075 struct iwm_softc *sc = device_get_softc(dev);
6076 struct ieee80211com *ic = &sc->sc_ic;
6081 sc->sc_attached = 1;
6083 mbufq_init(&sc->sc_snd, ifqmaxlen);
6084 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
6085 callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
6086 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
6088 sc->sc_notif_wait = iwm_notification_wait_init(sc);
6089 if (sc->sc_notif_wait == NULL) {
6090 device_printf(dev, "failed to init notification wait struct\n");
6095 sc->sc_phy_db = iwm_phy_db_init(sc);
6096 if (!sc->sc_phy_db) {
6097 device_printf(dev, "Cannot init phy_db\n");
6102 error = iwm_pci_attach(dev);
6106 sc->sc_wantresp = -1;
6108 /* Check device type */
6109 error = iwm_dev_check(dev);
6113 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6115 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6116 * changed, and now the revision step also includes bit 0-1 (no more
6117 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6118 * in the old format.
6120 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6121 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6122 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6124 if (iwm_prepare_card_hw(sc) != 0) {
6125 device_printf(dev, "could not initialize hardware\n");
6129 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6134 * In order to recognize C step the driver should read the
6135 * chip version id located at the AUX bus MISC address.
6137 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6138 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6141 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6142 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6143 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6146 device_printf(sc->sc_dev,
6147 "Failed to wake up the nic\n");
6151 if (iwm_nic_lock(sc)) {
6152 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6153 hw_step |= IWM_ENABLE_WFPM;
6154 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6155 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6156 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6158 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6159 (IWM_SILICON_C_STEP << 2);
6162 device_printf(sc->sc_dev, "Failed to lock the nic\n");
6167 /* special-case 7265D, it has the same PCI IDs. */
6168 if (sc->cfg == &iwm7265_cfg &&
6169 (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6170 sc->cfg = &iwm7265d_cfg;
6173 /* Allocate DMA memory for firmware transfers. */
6174 if ((error = iwm_alloc_fwmem(sc)) != 0) {
6175 device_printf(dev, "could not allocate memory for firmware\n");
6179 /* Allocate "Keep Warm" page. */
6180 if ((error = iwm_alloc_kw(sc)) != 0) {
6181 device_printf(dev, "could not allocate keep warm page\n");
6185 /* We use ICT interrupts */
6186 if ((error = iwm_alloc_ict(sc)) != 0) {
6187 device_printf(dev, "could not allocate ICT table\n");
6191 /* Allocate TX scheduler "rings". */
6192 if ((error = iwm_alloc_sched(sc)) != 0) {
6193 device_printf(dev, "could not allocate TX scheduler rings\n");
6197 /* Allocate TX rings */
6198 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6199 if ((error = iwm_alloc_tx_ring(sc,
6200 &sc->txq[txq_i], txq_i)) != 0) {
6202 "could not allocate TX ring %d\n",
6208 /* Allocate RX ring. */
6209 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6210 device_printf(dev, "could not allocate RX ring\n");
6214 /* Clear pending interrupts. */
6215 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6218 ic->ic_name = device_get_nameunit(sc->sc_dev);
6219 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
6220 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
6222 /* Set device capabilities. */
6225 IEEE80211_C_WPA | /* WPA/RSN */
6227 IEEE80211_C_SHSLOT | /* short slot time supported */
6228 IEEE80211_C_SHPREAMBLE /* short preamble supported */
6229 // IEEE80211_C_BGSCAN /* capable of bg scanning */
6231 for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6232 sc->sc_phyctxt[i].id = i;
6233 sc->sc_phyctxt[i].color = 0;
6234 sc->sc_phyctxt[i].ref = 0;
6235 sc->sc_phyctxt[i].channel = NULL;
6238 /* Default noise floor */
6242 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6244 sc->sc_preinit_hook.ich_func = iwm_preinit;
6245 sc->sc_preinit_hook.ich_arg = sc;
6246 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6247 device_printf(dev, "config_intrhook_establish failed\n");
6252 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6253 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6254 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6257 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6258 "<-%s\n", __func__);
6262 /* Free allocated memory if something failed during attachment. */
6264 iwm_detach_local(sc, 0);
6270 iwm_is_valid_ether_addr(uint8_t *addr)
6272 char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6274 if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6281 iwm_update_edca(struct ieee80211com *ic)
6283 struct iwm_softc *sc = ic->ic_softc;
6285 device_printf(sc->sc_dev, "%s: called\n", __func__);
6290 iwm_preinit(void *arg)
6292 struct iwm_softc *sc = arg;
6293 device_t dev = sc->sc_dev;
6294 struct ieee80211com *ic = &sc->sc_ic;
6297 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6298 "->%s\n", __func__);
6301 if ((error = iwm_start_hw(sc)) != 0) {
6302 device_printf(dev, "could not initialize hardware\n");
6307 error = iwm_run_init_mvm_ucode(sc, 1);
6308 iwm_stop_device(sc);
6314 "hw rev 0x%x, fw ver %s, address %s\n",
6315 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6316 sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6318 /* not all hardware can do 5GHz band */
6319 if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6320 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6321 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6324 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6328 * At this point we've committed - if we fail to do setup,
6329 * we now also have to tear down the net80211 state.
6331 ieee80211_ifattach(ic);
6332 ic->ic_vap_create = iwm_vap_create;
6333 ic->ic_vap_delete = iwm_vap_delete;
6334 ic->ic_raw_xmit = iwm_raw_xmit;
6335 ic->ic_node_alloc = iwm_node_alloc;
6336 ic->ic_scan_start = iwm_scan_start;
6337 ic->ic_scan_end = iwm_scan_end;
6338 ic->ic_update_mcast = iwm_update_mcast;
6339 ic->ic_getradiocaps = iwm_init_channel_map;
6340 ic->ic_set_channel = iwm_set_channel;
6341 ic->ic_scan_curchan = iwm_scan_curchan;
6342 ic->ic_scan_mindwell = iwm_scan_mindwell;
6343 ic->ic_wme.wme_update = iwm_update_edca;
6344 ic->ic_parent = iwm_parent;
6345 ic->ic_transmit = iwm_transmit;
6346 iwm_radiotap_attach(sc);
6348 ieee80211_announce(ic);
6350 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6351 "<-%s\n", __func__);
6352 config_intrhook_disestablish(&sc->sc_preinit_hook);
6356 config_intrhook_disestablish(&sc->sc_preinit_hook);
6357 iwm_detach_local(sc, 0);
6361 * Attach the interface to 802.11 radiotap.
6364 iwm_radiotap_attach(struct iwm_softc *sc)
6366 struct ieee80211com *ic = &sc->sc_ic;
6368 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6369 "->%s begin\n", __func__);
6370 ieee80211_radiotap_attach(ic,
6371 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6372 IWM_TX_RADIOTAP_PRESENT,
6373 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6374 IWM_RX_RADIOTAP_PRESENT);
6375 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6376 "->%s end\n", __func__);
6379 static struct ieee80211vap *
6380 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6381 enum ieee80211_opmode opmode, int flags,
6382 const uint8_t bssid[IEEE80211_ADDR_LEN],
6383 const uint8_t mac[IEEE80211_ADDR_LEN])
6385 struct iwm_vap *ivp;
6386 struct ieee80211vap *vap;
6388 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
6390 ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6392 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6393 vap->iv_bmissthreshold = 10; /* override default */
6394 /* Override with driver methods. */
6395 ivp->iv_newstate = vap->iv_newstate;
6396 vap->iv_newstate = iwm_newstate;
6398 ieee80211_ratectl_init(vap);
6399 /* Complete setup. */
6400 ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6402 ic->ic_opmode = opmode;
6408 iwm_vap_delete(struct ieee80211vap *vap)
6410 struct iwm_vap *ivp = IWM_VAP(vap);
6412 ieee80211_ratectl_deinit(vap);
6413 ieee80211_vap_detach(vap);
6414 free(ivp, M_80211_VAP);
6418 iwm_scan_start(struct ieee80211com *ic)
6420 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6421 struct iwm_softc *sc = ic->ic_softc;
6425 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6426 /* This should not be possible */
6427 device_printf(sc->sc_dev,
6428 "%s: Previous scan not completed yet\n", __func__);
6430 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6431 error = iwm_mvm_umac_scan(sc);
6433 error = iwm_mvm_lmac_scan(sc);
6435 device_printf(sc->sc_dev, "could not initiate scan\n");
6437 ieee80211_cancel_scan(vap);
6439 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6440 iwm_led_blink_start(sc);
6446 iwm_scan_end(struct ieee80211com *ic)
6448 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6449 struct iwm_softc *sc = ic->ic_softc;
6452 iwm_led_blink_stop(sc);
6453 if (vap->iv_state == IEEE80211_S_RUN)
6454 iwm_mvm_led_enable(sc);
6455 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6457 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6458 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6461 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6462 iwm_mvm_scan_stop_wait(sc);
6467 * Make sure we don't race, if sc_es_task is still enqueued here.
6468 * This is to make sure that it won't call ieee80211_scan_done
6469 * when we have already started the next scan.
6471 taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6475 iwm_update_mcast(struct ieee80211com *ic)
6480 iwm_set_channel(struct ieee80211com *ic)
6485 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6490 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6496 iwm_init_task(void *arg1)
6498 struct iwm_softc *sc = arg1;
6501 while (sc->sc_flags & IWM_FLAG_BUSY)
6502 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6503 sc->sc_flags |= IWM_FLAG_BUSY;
6505 if (sc->sc_ic.ic_nrunning > 0)
6507 sc->sc_flags &= ~IWM_FLAG_BUSY;
6508 wakeup(&sc->sc_flags);
6513 iwm_resume(device_t dev)
6515 struct iwm_softc *sc = device_get_softc(dev);
6519 * We disable the RETRY_TIMEOUT register (0x41) to keep
6520 * PCI Tx retries from interfering with C3 CPU state.
6522 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6523 iwm_init_task(device_get_softc(dev));
6526 if (sc->sc_flags & IWM_FLAG_SCANNING) {
6527 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6533 ieee80211_resume_all(&sc->sc_ic);
6539 iwm_suspend(device_t dev)
6542 struct iwm_softc *sc = device_get_softc(dev);
6544 do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6546 ieee80211_suspend_all(&sc->sc_ic);
6551 sc->sc_flags |= IWM_FLAG_SCANNING;
6559 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6561 struct iwm_fw_info *fw = &sc->sc_fw;
6562 device_t dev = sc->sc_dev;
6565 if (!sc->sc_attached)
6567 sc->sc_attached = 0;
6570 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6572 callout_drain(&sc->sc_led_blink_to);
6573 callout_drain(&sc->sc_watchdog_to);
6574 iwm_stop_device(sc);
6576 ieee80211_ifdetach(&sc->sc_ic);
6579 iwm_phy_db_free(sc->sc_phy_db);
6580 sc->sc_phy_db = NULL;
6582 iwm_free_nvm_data(sc->nvm_data);
6584 /* Free descriptor rings */
6585 iwm_free_rx_ring(sc, &sc->rxq);
6586 for (i = 0; i < nitems(sc->txq); i++)
6587 iwm_free_tx_ring(sc, &sc->txq[i]);
6590 if (fw->fw_fp != NULL)
6591 iwm_fw_info_free(fw);
6593 /* Free scheduler */
6594 iwm_dma_contig_free(&sc->sched_dma);
6595 iwm_dma_contig_free(&sc->ict_dma);
6596 iwm_dma_contig_free(&sc->kw_dma);
6597 iwm_dma_contig_free(&sc->fw_dma);
6599 /* Finished with the hardware - detach things */
6600 iwm_pci_detach(dev);
6602 if (sc->sc_notif_wait != NULL) {
6603 iwm_notification_wait_free(sc->sc_notif_wait);
6604 sc->sc_notif_wait = NULL;
6607 mbufq_drain(&sc->sc_snd);
6608 IWM_LOCK_DESTROY(sc);
6614 iwm_detach(device_t dev)
6616 struct iwm_softc *sc = device_get_softc(dev);
6618 return (iwm_detach_local(sc, 1));
6621 static device_method_t iwm_pci_methods[] = {
6622 /* Device interface */
6623 DEVMETHOD(device_probe, iwm_probe),
6624 DEVMETHOD(device_attach, iwm_attach),
6625 DEVMETHOD(device_detach, iwm_detach),
6626 DEVMETHOD(device_suspend, iwm_suspend),
6627 DEVMETHOD(device_resume, iwm_resume),
6632 static driver_t iwm_pci_driver = {
6635 sizeof (struct iwm_softc)
6638 static devclass_t iwm_devclass;
6640 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6641 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6642 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6643 MODULE_DEPEND(iwm, wlan, 1, 1, 1);