]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
[iwm] Sync valid_tx_ant and valid_rx_ant mask handling with iwlwifi.
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109
110 #include <sys/param.h>
111 #include <sys/bus.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
126
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
130
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
133
134 #include <net/bpf.h>
135
136 #include <net/if.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
142
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
147
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
152
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_util.h>
157 #include <dev/iwm/if_iwm_binding.h>
158 #include <dev/iwm/if_iwm_phy_db.h>
159 #include <dev/iwm/if_iwm_mac_ctxt.h>
160 #include <dev/iwm/if_iwm_phy_ctxt.h>
161 #include <dev/iwm/if_iwm_time_event.h>
162 #include <dev/iwm/if_iwm_power.h>
163 #include <dev/iwm/if_iwm_scan.h>
164
165 #include <dev/iwm/if_iwm_pcie_trans.h>
166 #include <dev/iwm/if_iwm_led.h>
167
168 #define IWM_NVM_HW_SECTION_NUM_FAMILY_7000      0
169 #define IWM_NVM_HW_SECTION_NUM_FAMILY_8000      10
170
171 /* lower blocks contain EEPROM image and calibration data */
172 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000      (16 * 512 * sizeof(uint16_t)) /* 16 KB */
173 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000      (32 * 512 * sizeof(uint16_t)) /* 32 KB */
174
175 #define IWM7260_FW      "iwm7260fw"
176 #define IWM3160_FW      "iwm3160fw"
177 #define IWM7265_FW      "iwm7265fw"
178 #define IWM7265D_FW     "iwm7265Dfw"
179 #define IWM8000_FW      "iwm8000Cfw"
180
181 #define IWM_DEVICE_7000_COMMON                                          \
182         .device_family = IWM_DEVICE_FAMILY_7000,                        \
183         .eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000,              \
184         .nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_7000
185
186 const struct iwm_cfg iwm7260_cfg = {
187         .fw_name = IWM7260_FW,
188         IWM_DEVICE_7000_COMMON,
189         .host_interrupt_operation_mode = 1,
190 };
191
192 const struct iwm_cfg iwm3160_cfg = {
193         .fw_name = IWM3160_FW,
194         IWM_DEVICE_7000_COMMON,
195         .host_interrupt_operation_mode = 1,
196 };
197
198 const struct iwm_cfg iwm3165_cfg = {
199         /* XXX IWM7265D_FW doesn't seem to work properly yet */
200         .fw_name = IWM7265_FW,
201         IWM_DEVICE_7000_COMMON,
202         .host_interrupt_operation_mode = 0,
203 };
204
205 const struct iwm_cfg iwm7265_cfg = {
206         .fw_name = IWM7265_FW,
207         IWM_DEVICE_7000_COMMON,
208         .host_interrupt_operation_mode = 0,
209 };
210
211 const struct iwm_cfg iwm7265d_cfg = {
212         /* XXX IWM7265D_FW doesn't seem to work properly yet */
213         .fw_name = IWM7265_FW,
214         IWM_DEVICE_7000_COMMON,
215         .host_interrupt_operation_mode = 0,
216 };
217
218 #define IWM_DEVICE_8000_COMMON                                          \
219         .device_family = IWM_DEVICE_FAMILY_8000,                        \
220         .eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000,              \
221         .nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_8000
222
223 const struct iwm_cfg iwm8260_cfg = {
224         .fw_name = IWM8000_FW,
225         IWM_DEVICE_8000_COMMON,
226         .host_interrupt_operation_mode = 0,
227 };
228
229 const uint8_t iwm_nvm_channels[] = {
230         /* 2.4 GHz */
231         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
232         /* 5 GHz */
233         36, 40, 44, 48, 52, 56, 60, 64,
234         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
235         149, 153, 157, 161, 165
236 };
237 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
238     "IWM_NUM_CHANNELS is too small");
239
240 const uint8_t iwm_nvm_channels_8000[] = {
241         /* 2.4 GHz */
242         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
243         /* 5 GHz */
244         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
245         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
246         149, 153, 157, 161, 165, 169, 173, 177, 181
247 };
248 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
249     "IWM_NUM_CHANNELS_8000 is too small");
250
251 #define IWM_NUM_2GHZ_CHANNELS   14
252 #define IWM_N_HW_ADDR_MASK      0xF
253
254 /*
255  * XXX For now, there's simply a fixed set of rate table entries
256  * that are populated.
257  */
258 const struct iwm_rate {
259         uint8_t rate;
260         uint8_t plcp;
261 } iwm_rates[] = {
262         {   2,  IWM_RATE_1M_PLCP  },
263         {   4,  IWM_RATE_2M_PLCP  },
264         {  11,  IWM_RATE_5M_PLCP  },
265         {  22,  IWM_RATE_11M_PLCP },
266         {  12,  IWM_RATE_6M_PLCP  },
267         {  18,  IWM_RATE_9M_PLCP  },
268         {  24,  IWM_RATE_12M_PLCP },
269         {  36,  IWM_RATE_18M_PLCP },
270         {  48,  IWM_RATE_24M_PLCP },
271         {  72,  IWM_RATE_36M_PLCP },
272         {  96,  IWM_RATE_48M_PLCP },
273         { 108,  IWM_RATE_54M_PLCP },
274 };
275 #define IWM_RIDX_CCK    0
276 #define IWM_RIDX_OFDM   4
277 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
278 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
279 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
280
281 struct iwm_nvm_section {
282         uint16_t length;
283         uint8_t *data;
284 };
285
286 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
287 static int      iwm_firmware_store_section(struct iwm_softc *,
288                                            enum iwm_ucode_type,
289                                            const uint8_t *, size_t);
290 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
291 static void     iwm_fw_info_free(struct iwm_fw_info *);
292 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
293 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
294 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
295                                      bus_size_t, bus_size_t);
296 static void     iwm_dma_contig_free(struct iwm_dma_info *);
297 static int      iwm_alloc_fwmem(struct iwm_softc *);
298 static int      iwm_alloc_sched(struct iwm_softc *);
299 static int      iwm_alloc_kw(struct iwm_softc *);
300 static int      iwm_alloc_ict(struct iwm_softc *);
301 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
302 static void     iwm_disable_rx_dma(struct iwm_softc *);
303 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
304 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
305 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
306                                   int);
307 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
308 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
309 static void     iwm_enable_interrupts(struct iwm_softc *);
310 static void     iwm_restore_interrupts(struct iwm_softc *);
311 static void     iwm_disable_interrupts(struct iwm_softc *);
312 static void     iwm_ict_reset(struct iwm_softc *);
313 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
314 static void     iwm_stop_device(struct iwm_softc *);
315 static void     iwm_mvm_nic_config(struct iwm_softc *);
316 static int      iwm_nic_rx_init(struct iwm_softc *);
317 static int      iwm_nic_tx_init(struct iwm_softc *);
318 static int      iwm_nic_init(struct iwm_softc *);
319 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
320 static int      iwm_post_alive(struct iwm_softc *);
321 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
322                                    uint16_t, uint8_t *, uint16_t *);
323 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
324                                      uint16_t *, uint32_t);
325 static uint32_t iwm_eeprom_channel_flags(uint16_t);
326 static void     iwm_add_channel_band(struct iwm_softc *,
327                     struct ieee80211_channel[], int, int *, int, size_t,
328                     const uint8_t[]);
329 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
330                     struct ieee80211_channel[]);
331 static struct iwm_nvm_data *
332         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
333                            const uint16_t *, const uint16_t *,
334                            const uint16_t *, const uint16_t *,
335                            const uint16_t *);
336 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
337 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
338                                                struct iwm_nvm_data *,
339                                                const uint16_t *,
340                                                const uint16_t *);
341 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
342                             const uint16_t *);
343 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
344 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
345                                   const uint16_t *);
346 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
347                                    const uint16_t *);
348 static void     iwm_set_radio_cfg(const struct iwm_softc *,
349                                   struct iwm_nvm_data *, uint32_t);
350 static struct iwm_nvm_data *
351         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
352 static int      iwm_nvm_init(struct iwm_softc *);
353 static int      iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
354                                        const uint8_t *, uint32_t);
355 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
356                                         const uint8_t *, uint32_t);
357 static int      iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
358 static int      iwm_load_cpu_sections_8000(struct iwm_softc *,
359                                            struct iwm_fw_sects *, int , int *);
360 static int      iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
361 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
362 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
363 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
364 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
365 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
366                                               enum iwm_ucode_type);
367 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
368 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
369 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
370 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
371                                             struct iwm_rx_phy_info *);
372 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
373                                       struct iwm_rx_packet *,
374                                       struct iwm_rx_data *);
375 static int      iwm_get_noise(struct iwm_softc *sc,
376                     const struct iwm_mvm_statistics_rx_non_phy *);
377 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
378                                    struct iwm_rx_data *);
379 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
380                                          struct iwm_rx_packet *,
381                                          struct iwm_node *);
382 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
383                                   struct iwm_rx_data *);
384 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
385 #if 0
386 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
387                                  uint16_t);
388 #endif
389 static const struct iwm_rate *
390         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
391                         struct mbuf *, struct iwm_tx_cmd *);
392 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
393                        struct ieee80211_node *, int);
394 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
395                              const struct ieee80211_bpf_params *);
396 static int      iwm_mvm_flush_tx_path(struct iwm_softc *sc,
397                                       uint32_t tfd_msk, uint32_t flags);
398 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
399                                                 struct iwm_mvm_add_sta_cmd_v7 *,
400                                                 int *);
401 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
402                                        int);
403 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
404 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
405 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
406                                            struct iwm_int_sta *,
407                                            const uint8_t *, uint16_t, uint16_t);
408 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
409 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
410 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
411 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
412 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
413 static struct ieee80211_node *
414                 iwm_node_alloc(struct ieee80211vap *,
415                                const uint8_t[IEEE80211_ADDR_LEN]);
416 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
417 static int      iwm_media_change(struct ifnet *);
418 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
419 static void     iwm_endscan_cb(void *, int);
420 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
421                                         struct iwm_sf_cfg_cmd *,
422                                         struct ieee80211_node *);
423 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
424 static int      iwm_send_bt_init_conf(struct iwm_softc *);
425 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
426 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
427 static int      iwm_init_hw(struct iwm_softc *);
428 static void     iwm_init(struct iwm_softc *);
429 static void     iwm_start(struct iwm_softc *);
430 static void     iwm_stop(struct iwm_softc *);
431 static void     iwm_watchdog(void *);
432 static void     iwm_parent(struct ieee80211com *);
433 #ifdef IWM_DEBUG
434 static const char *
435                 iwm_desc_lookup(uint32_t);
436 static void     iwm_nic_error(struct iwm_softc *);
437 static void     iwm_nic_umac_error(struct iwm_softc *);
438 #endif
439 static void     iwm_notif_intr(struct iwm_softc *);
440 static void     iwm_intr(void *);
441 static int      iwm_attach(device_t);
442 static int      iwm_is_valid_ether_addr(uint8_t *);
443 static void     iwm_preinit(void *);
444 static int      iwm_detach_local(struct iwm_softc *sc, int);
445 static void     iwm_init_task(void *);
446 static void     iwm_radiotap_attach(struct iwm_softc *);
447 static struct ieee80211vap *
448                 iwm_vap_create(struct ieee80211com *,
449                                const char [IFNAMSIZ], int,
450                                enum ieee80211_opmode, int,
451                                const uint8_t [IEEE80211_ADDR_LEN],
452                                const uint8_t [IEEE80211_ADDR_LEN]);
453 static void     iwm_vap_delete(struct ieee80211vap *);
454 static void     iwm_scan_start(struct ieee80211com *);
455 static void     iwm_scan_end(struct ieee80211com *);
456 static void     iwm_update_mcast(struct ieee80211com *);
457 static void     iwm_set_channel(struct ieee80211com *);
458 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
459 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
460 static int      iwm_detach(device_t);
461
462 /*
463  * Firmware parser.
464  */
465
466 static int
467 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
468 {
469         const struct iwm_fw_cscheme_list *l = (const void *)data;
470
471         if (dlen < sizeof(*l) ||
472             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
473                 return EINVAL;
474
475         /* we don't actually store anything for now, always use s/w crypto */
476
477         return 0;
478 }
479
480 static int
481 iwm_firmware_store_section(struct iwm_softc *sc,
482     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
483 {
484         struct iwm_fw_sects *fws;
485         struct iwm_fw_onesect *fwone;
486
487         if (type >= IWM_UCODE_TYPE_MAX)
488                 return EINVAL;
489         if (dlen < sizeof(uint32_t))
490                 return EINVAL;
491
492         fws = &sc->sc_fw.fw_sects[type];
493         if (fws->fw_count >= IWM_UCODE_SECT_MAX)
494                 return EINVAL;
495
496         fwone = &fws->fw_sect[fws->fw_count];
497
498         /* first 32bit are device load offset */
499         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
500
501         /* rest is data */
502         fwone->fws_data = data + sizeof(uint32_t);
503         fwone->fws_len = dlen - sizeof(uint32_t);
504
505         fws->fw_count++;
506
507         return 0;
508 }
509
510 #define IWM_DEFAULT_SCAN_CHANNELS 40
511
512 /* iwlwifi: iwl-drv.c */
513 struct iwm_tlv_calib_data {
514         uint32_t ucode_type;
515         struct iwm_tlv_calib_ctrl calib;
516 } __packed;
517
518 static int
519 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
520 {
521         const struct iwm_tlv_calib_data *def_calib = data;
522         uint32_t ucode_type = le32toh(def_calib->ucode_type);
523
524         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
525                 device_printf(sc->sc_dev,
526                     "Wrong ucode_type %u for default "
527                     "calibration.\n", ucode_type);
528                 return EINVAL;
529         }
530
531         sc->sc_default_calib[ucode_type].flow_trigger =
532             def_calib->calib.flow_trigger;
533         sc->sc_default_calib[ucode_type].event_trigger =
534             def_calib->calib.event_trigger;
535
536         return 0;
537 }
538
539 static void
540 iwm_fw_info_free(struct iwm_fw_info *fw)
541 {
542         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
543         fw->fw_fp = NULL;
544         /* don't touch fw->fw_status */
545         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
546 }
547
548 static int
549 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
550 {
551         struct iwm_fw_info *fw = &sc->sc_fw;
552         const struct iwm_tlv_ucode_header *uhdr;
553         struct iwm_ucode_tlv tlv;
554         enum iwm_ucode_tlv_type tlv_type;
555         const struct firmware *fwp;
556         const uint8_t *data;
557         int error = 0;
558         size_t len;
559
560         if (fw->fw_status == IWM_FW_STATUS_DONE &&
561             ucode_type != IWM_UCODE_TYPE_INIT)
562                 return 0;
563
564         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
565                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
566         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
567
568         if (fw->fw_fp != NULL)
569                 iwm_fw_info_free(fw);
570
571         /*
572          * Load firmware into driver memory.
573          * fw_fp will be set.
574          */
575         IWM_UNLOCK(sc);
576         fwp = firmware_get(sc->cfg->fw_name);
577         IWM_LOCK(sc);
578         if (fwp == NULL) {
579                 device_printf(sc->sc_dev,
580                     "could not read firmware %s (error %d)\n",
581                     sc->cfg->fw_name, error);
582                 goto out;
583         }
584         fw->fw_fp = fwp;
585
586         /* (Re-)Initialize default values. */
587         sc->sc_capaflags = 0;
588         sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
589         memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
590         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
591
592         /*
593          * Parse firmware contents
594          */
595
596         uhdr = (const void *)fw->fw_fp->data;
597         if (*(const uint32_t *)fw->fw_fp->data != 0
598             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
599                 device_printf(sc->sc_dev, "invalid firmware %s\n",
600                     sc->cfg->fw_name);
601                 error = EINVAL;
602                 goto out;
603         }
604
605         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
606             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
607             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
608             IWM_UCODE_API(le32toh(uhdr->ver)));
609         data = uhdr->data;
610         len = fw->fw_fp->datasize - sizeof(*uhdr);
611
612         while (len >= sizeof(tlv)) {
613                 size_t tlv_len;
614                 const void *tlv_data;
615
616                 memcpy(&tlv, data, sizeof(tlv));
617                 tlv_len = le32toh(tlv.length);
618                 tlv_type = le32toh(tlv.type);
619
620                 len -= sizeof(tlv);
621                 data += sizeof(tlv);
622                 tlv_data = data;
623
624                 if (len < tlv_len) {
625                         device_printf(sc->sc_dev,
626                             "firmware too short: %zu bytes\n",
627                             len);
628                         error = EINVAL;
629                         goto parse_out;
630                 }
631
632                 switch ((int)tlv_type) {
633                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
634                         if (tlv_len < sizeof(uint32_t)) {
635                                 device_printf(sc->sc_dev,
636                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
637                                     __func__,
638                                     (int) tlv_len);
639                                 error = EINVAL;
640                                 goto parse_out;
641                         }
642                         sc->sc_capa_max_probe_len
643                             = le32toh(*(const uint32_t *)tlv_data);
644                         /* limit it to something sensible */
645                         if (sc->sc_capa_max_probe_len >
646                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
647                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
648                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
649                                     "ridiculous\n", __func__);
650                                 error = EINVAL;
651                                 goto parse_out;
652                         }
653                         break;
654                 case IWM_UCODE_TLV_PAN:
655                         if (tlv_len) {
656                                 device_printf(sc->sc_dev,
657                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
658                                     __func__,
659                                     (int) tlv_len);
660                                 error = EINVAL;
661                                 goto parse_out;
662                         }
663                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
664                         break;
665                 case IWM_UCODE_TLV_FLAGS:
666                         if (tlv_len < sizeof(uint32_t)) {
667                                 device_printf(sc->sc_dev,
668                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
669                                     __func__,
670                                     (int) tlv_len);
671                                 error = EINVAL;
672                                 goto parse_out;
673                         }
674                         /*
675                          * Apparently there can be many flags, but Linux driver
676                          * parses only the first one, and so do we.
677                          *
678                          * XXX: why does this override IWM_UCODE_TLV_PAN?
679                          * Intentional or a bug?  Observations from
680                          * current firmware file:
681                          *  1) TLV_PAN is parsed first
682                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
683                          * ==> this resets TLV_PAN to itself... hnnnk
684                          */
685                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
686                         break;
687                 case IWM_UCODE_TLV_CSCHEME:
688                         if ((error = iwm_store_cscheme(sc,
689                             tlv_data, tlv_len)) != 0) {
690                                 device_printf(sc->sc_dev,
691                                     "%s: iwm_store_cscheme(): returned %d\n",
692                                     __func__,
693                                     error);
694                                 goto parse_out;
695                         }
696                         break;
697                 case IWM_UCODE_TLV_NUM_OF_CPU: {
698                         uint32_t num_cpu;
699                         if (tlv_len != sizeof(uint32_t)) {
700                                 device_printf(sc->sc_dev,
701                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
702                                     __func__,
703                                     (int) tlv_len);
704                                 error = EINVAL;
705                                 goto parse_out;
706                         }
707                         num_cpu = le32toh(*(const uint32_t *)tlv_data);
708                         if (num_cpu < 1 || num_cpu > 2) {
709                                 device_printf(sc->sc_dev,
710                                     "%s: Driver supports only 1 or 2 CPUs\n",
711                                     __func__);
712                                 error = EINVAL;
713                                 goto parse_out;
714                         }
715                         break;
716                 }
717                 case IWM_UCODE_TLV_SEC_RT:
718                         if ((error = iwm_firmware_store_section(sc,
719                             IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
720                                 device_printf(sc->sc_dev,
721                                     "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
722                                     __func__,
723                                     error);
724                                 goto parse_out;
725                         }
726                         break;
727                 case IWM_UCODE_TLV_SEC_INIT:
728                         if ((error = iwm_firmware_store_section(sc,
729                             IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
730                                 device_printf(sc->sc_dev,
731                                     "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
732                                     __func__,
733                                     error);
734                                 goto parse_out;
735                         }
736                         break;
737                 case IWM_UCODE_TLV_SEC_WOWLAN:
738                         if ((error = iwm_firmware_store_section(sc,
739                             IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
740                                 device_printf(sc->sc_dev,
741                                     "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
742                                     __func__,
743                                     error);
744                                 goto parse_out;
745                         }
746                         break;
747                 case IWM_UCODE_TLV_DEF_CALIB:
748                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
749                                 device_printf(sc->sc_dev,
750                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
751                                     __func__,
752                                     (int) tlv_len,
753                                     (int) sizeof(struct iwm_tlv_calib_data));
754                                 error = EINVAL;
755                                 goto parse_out;
756                         }
757                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
758                                 device_printf(sc->sc_dev,
759                                     "%s: iwm_set_default_calib() failed: %d\n",
760                                     __func__,
761                                     error);
762                                 goto parse_out;
763                         }
764                         break;
765                 case IWM_UCODE_TLV_PHY_SKU:
766                         if (tlv_len != sizeof(uint32_t)) {
767                                 error = EINVAL;
768                                 device_printf(sc->sc_dev,
769                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
770                                     __func__,
771                                     (int) tlv_len);
772                                 goto parse_out;
773                         }
774                         sc->sc_fw.phy_config =
775                             le32toh(*(const uint32_t *)tlv_data);
776                         sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
777                                                   IWM_FW_PHY_CFG_TX_CHAIN) >>
778                                                   IWM_FW_PHY_CFG_TX_CHAIN_POS;
779                         sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
780                                                   IWM_FW_PHY_CFG_RX_CHAIN) >>
781                                                   IWM_FW_PHY_CFG_RX_CHAIN_POS;
782                         break;
783
784                 case IWM_UCODE_TLV_API_CHANGES_SET: {
785                         const struct iwm_ucode_api *api;
786                         if (tlv_len != sizeof(*api)) {
787                                 error = EINVAL;
788                                 goto parse_out;
789                         }
790                         api = (const struct iwm_ucode_api *)tlv_data;
791                         /* Flags may exceed 32 bits in future firmware. */
792                         if (le32toh(api->api_index) > 0) {
793                                 device_printf(sc->sc_dev,
794                                     "unsupported API index %d\n",
795                                     le32toh(api->api_index));
796                                 goto parse_out;
797                         }
798                         sc->sc_ucode_api = le32toh(api->api_flags);
799                         break;
800                 }
801
802                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
803                         const struct iwm_ucode_capa *capa;
804                         int idx, i;
805                         if (tlv_len != sizeof(*capa)) {
806                                 error = EINVAL;
807                                 goto parse_out;
808                         }
809                         capa = (const struct iwm_ucode_capa *)tlv_data;
810                         idx = le32toh(capa->api_index);
811                         if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
812                                 device_printf(sc->sc_dev,
813                                     "unsupported API index %d\n", idx);
814                                 goto parse_out;
815                         }
816                         for (i = 0; i < 32; i++) {
817                                 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
818                                         continue;
819                                 setbit(sc->sc_enabled_capa, i + (32 * idx));
820                         }
821                         break;
822                 }
823
824                 case 48: /* undocumented TLV */
825                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
826                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
827                         /* ignore, not used by current driver */
828                         break;
829
830                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
831                         if ((error = iwm_firmware_store_section(sc,
832                             IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
833                             tlv_len)) != 0)
834                                 goto parse_out;
835                         break;
836
837                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
838                         if (tlv_len != sizeof(uint32_t)) {
839                                 error = EINVAL;
840                                 goto parse_out;
841                         }
842                         sc->sc_capa_n_scan_channels =
843                           le32toh(*(const uint32_t *)tlv_data);
844                         break;
845
846                 case IWM_UCODE_TLV_FW_VERSION:
847                         if (tlv_len != sizeof(uint32_t) * 3) {
848                                 error = EINVAL;
849                                 goto parse_out;
850                         }
851                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
852                             "%d.%d.%d",
853                             le32toh(((const uint32_t *)tlv_data)[0]),
854                             le32toh(((const uint32_t *)tlv_data)[1]),
855                             le32toh(((const uint32_t *)tlv_data)[2]));
856                         break;
857
858                 default:
859                         device_printf(sc->sc_dev,
860                             "%s: unknown firmware section %d, abort\n",
861                             __func__, tlv_type);
862                         error = EINVAL;
863                         goto parse_out;
864                 }
865
866                 len -= roundup(tlv_len, 4);
867                 data += roundup(tlv_len, 4);
868         }
869
870         KASSERT(error == 0, ("unhandled error"));
871
872  parse_out:
873         if (error) {
874                 device_printf(sc->sc_dev, "firmware parse error %d, "
875                     "section type %d\n", error, tlv_type);
876         }
877
878         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
879                 device_printf(sc->sc_dev,
880                     "device uses unsupported power ops\n");
881                 error = ENOTSUP;
882         }
883
884  out:
885         if (error) {
886                 fw->fw_status = IWM_FW_STATUS_NONE;
887                 if (fw->fw_fp != NULL)
888                         iwm_fw_info_free(fw);
889         } else
890                 fw->fw_status = IWM_FW_STATUS_DONE;
891         wakeup(&sc->sc_fw);
892
893         return error;
894 }
895
896 /*
897  * DMA resource routines
898  */
899
900 static void
901 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
902 {
903         if (error != 0)
904                 return;
905         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
906         *(bus_addr_t *)arg = segs[0].ds_addr;
907 }
908
909 static int
910 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
911     bus_size_t size, bus_size_t alignment)
912 {
913         int error;
914
915         dma->tag = NULL;
916         dma->map = NULL;
917         dma->size = size;
918         dma->vaddr = NULL;
919
920         error = bus_dma_tag_create(tag, alignment,
921             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
922             1, size, 0, NULL, NULL, &dma->tag);
923         if (error != 0)
924                 goto fail;
925
926         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
927             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
928         if (error != 0)
929                 goto fail;
930
931         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
932             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
933         if (error != 0) {
934                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
935                 dma->vaddr = NULL;
936                 goto fail;
937         }
938
939         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
940
941         return 0;
942
943 fail:
944         iwm_dma_contig_free(dma);
945
946         return error;
947 }
948
949 static void
950 iwm_dma_contig_free(struct iwm_dma_info *dma)
951 {
952         if (dma->vaddr != NULL) {
953                 bus_dmamap_sync(dma->tag, dma->map,
954                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
955                 bus_dmamap_unload(dma->tag, dma->map);
956                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
957                 dma->vaddr = NULL;
958         }
959         if (dma->tag != NULL) {
960                 bus_dma_tag_destroy(dma->tag);
961                 dma->tag = NULL;
962         }
963 }
964
965 /* fwmem is used to load firmware onto the card */
966 static int
967 iwm_alloc_fwmem(struct iwm_softc *sc)
968 {
969         /* Must be aligned on a 16-byte boundary. */
970         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
971             sc->sc_fwdmasegsz, 16);
972 }
973
974 /* tx scheduler rings.  not used? */
975 static int
976 iwm_alloc_sched(struct iwm_softc *sc)
977 {
978         /* TX scheduler rings must be aligned on a 1KB boundary. */
979         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
980             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
981 }
982
983 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
984 static int
985 iwm_alloc_kw(struct iwm_softc *sc)
986 {
987         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
988 }
989
990 /* interrupt cause table */
991 static int
992 iwm_alloc_ict(struct iwm_softc *sc)
993 {
994         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
995             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
996 }
997
998 static int
999 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1000 {
1001         bus_size_t size;
1002         int i, error;
1003
1004         ring->cur = 0;
1005
1006         /* Allocate RX descriptors (256-byte aligned). */
1007         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1008         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1009         if (error != 0) {
1010                 device_printf(sc->sc_dev,
1011                     "could not allocate RX ring DMA memory\n");
1012                 goto fail;
1013         }
1014         ring->desc = ring->desc_dma.vaddr;
1015
1016         /* Allocate RX status area (16-byte aligned). */
1017         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1018             sizeof(*ring->stat), 16);
1019         if (error != 0) {
1020                 device_printf(sc->sc_dev,
1021                     "could not allocate RX status DMA memory\n");
1022                 goto fail;
1023         }
1024         ring->stat = ring->stat_dma.vaddr;
1025
1026         /* Create RX buffer DMA tag. */
1027         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1028             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1029             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1030         if (error != 0) {
1031                 device_printf(sc->sc_dev,
1032                     "%s: could not create RX buf DMA tag, error %d\n",
1033                     __func__, error);
1034                 goto fail;
1035         }
1036
1037         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1038         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1039         if (error != 0) {
1040                 device_printf(sc->sc_dev,
1041                     "%s: could not create RX buf DMA map, error %d\n",
1042                     __func__, error);
1043                 goto fail;
1044         }
1045         /*
1046          * Allocate and map RX buffers.
1047          */
1048         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1049                 struct iwm_rx_data *data = &ring->data[i];
1050                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1051                 if (error != 0) {
1052                         device_printf(sc->sc_dev,
1053                             "%s: could not create RX buf DMA map, error %d\n",
1054                             __func__, error);
1055                         goto fail;
1056                 }
1057                 data->m = NULL;
1058
1059                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1060                         goto fail;
1061                 }
1062         }
1063         return 0;
1064
1065 fail:   iwm_free_rx_ring(sc, ring);
1066         return error;
1067 }
1068
1069 static void
1070 iwm_disable_rx_dma(struct iwm_softc *sc)
1071 {
1072         /* XXX conditional nic locks are stupid */
1073         /* XXX print out if we can't lock the NIC? */
1074         if (iwm_nic_lock(sc)) {
1075                 /* XXX handle if RX stop doesn't finish? */
1076                 (void) iwm_pcie_rx_stop(sc);
1077                 iwm_nic_unlock(sc);
1078         }
1079 }
1080
1081 static void
1082 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1083 {
1084         /* Reset the ring state */
1085         ring->cur = 0;
1086
1087         /*
1088          * The hw rx ring index in shared memory must also be cleared,
1089          * otherwise the discrepancy can cause reprocessing chaos.
1090          */
1091         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1092 }
1093
1094 static void
1095 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1096 {
1097         int i;
1098
1099         iwm_dma_contig_free(&ring->desc_dma);
1100         iwm_dma_contig_free(&ring->stat_dma);
1101
1102         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1103                 struct iwm_rx_data *data = &ring->data[i];
1104
1105                 if (data->m != NULL) {
1106                         bus_dmamap_sync(ring->data_dmat, data->map,
1107                             BUS_DMASYNC_POSTREAD);
1108                         bus_dmamap_unload(ring->data_dmat, data->map);
1109                         m_freem(data->m);
1110                         data->m = NULL;
1111                 }
1112                 if (data->map != NULL) {
1113                         bus_dmamap_destroy(ring->data_dmat, data->map);
1114                         data->map = NULL;
1115                 }
1116         }
1117         if (ring->spare_map != NULL) {
1118                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1119                 ring->spare_map = NULL;
1120         }
1121         if (ring->data_dmat != NULL) {
1122                 bus_dma_tag_destroy(ring->data_dmat);
1123                 ring->data_dmat = NULL;
1124         }
1125 }
1126
1127 static int
1128 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1129 {
1130         bus_addr_t paddr;
1131         bus_size_t size;
1132         size_t maxsize;
1133         int nsegments;
1134         int i, error;
1135
1136         ring->qid = qid;
1137         ring->queued = 0;
1138         ring->cur = 0;
1139
1140         /* Allocate TX descriptors (256-byte aligned). */
1141         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1142         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1143         if (error != 0) {
1144                 device_printf(sc->sc_dev,
1145                     "could not allocate TX ring DMA memory\n");
1146                 goto fail;
1147         }
1148         ring->desc = ring->desc_dma.vaddr;
1149
1150         /*
1151          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1152          * to allocate commands space for other rings.
1153          */
1154         if (qid > IWM_MVM_CMD_QUEUE)
1155                 return 0;
1156
1157         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1158         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1159         if (error != 0) {
1160                 device_printf(sc->sc_dev,
1161                     "could not allocate TX cmd DMA memory\n");
1162                 goto fail;
1163         }
1164         ring->cmd = ring->cmd_dma.vaddr;
1165
1166         /* FW commands may require more mapped space than packets. */
1167         if (qid == IWM_MVM_CMD_QUEUE) {
1168                 maxsize = IWM_RBUF_SIZE;
1169                 nsegments = 1;
1170         } else {
1171                 maxsize = MCLBYTES;
1172                 nsegments = IWM_MAX_SCATTER - 2;
1173         }
1174
1175         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1176             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1177             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1178         if (error != 0) {
1179                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1180                 goto fail;
1181         }
1182
1183         paddr = ring->cmd_dma.paddr;
1184         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1185                 struct iwm_tx_data *data = &ring->data[i];
1186
1187                 data->cmd_paddr = paddr;
1188                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1189                     + offsetof(struct iwm_tx_cmd, scratch);
1190                 paddr += sizeof(struct iwm_device_cmd);
1191
1192                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1193                 if (error != 0) {
1194                         device_printf(sc->sc_dev,
1195                             "could not create TX buf DMA map\n");
1196                         goto fail;
1197                 }
1198         }
1199         KASSERT(paddr == ring->cmd_dma.paddr + size,
1200             ("invalid physical address"));
1201         return 0;
1202
1203 fail:   iwm_free_tx_ring(sc, ring);
1204         return error;
1205 }
1206
1207 static void
1208 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1209 {
1210         int i;
1211
1212         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1213                 struct iwm_tx_data *data = &ring->data[i];
1214
1215                 if (data->m != NULL) {
1216                         bus_dmamap_sync(ring->data_dmat, data->map,
1217                             BUS_DMASYNC_POSTWRITE);
1218                         bus_dmamap_unload(ring->data_dmat, data->map);
1219                         m_freem(data->m);
1220                         data->m = NULL;
1221                 }
1222         }
1223         /* Clear TX descriptors. */
1224         memset(ring->desc, 0, ring->desc_dma.size);
1225         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1226             BUS_DMASYNC_PREWRITE);
1227         sc->qfullmsk &= ~(1 << ring->qid);
1228         ring->queued = 0;
1229         ring->cur = 0;
1230 }
1231
1232 static void
1233 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1234 {
1235         int i;
1236
1237         iwm_dma_contig_free(&ring->desc_dma);
1238         iwm_dma_contig_free(&ring->cmd_dma);
1239
1240         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1241                 struct iwm_tx_data *data = &ring->data[i];
1242
1243                 if (data->m != NULL) {
1244                         bus_dmamap_sync(ring->data_dmat, data->map,
1245                             BUS_DMASYNC_POSTWRITE);
1246                         bus_dmamap_unload(ring->data_dmat, data->map);
1247                         m_freem(data->m);
1248                         data->m = NULL;
1249                 }
1250                 if (data->map != NULL) {
1251                         bus_dmamap_destroy(ring->data_dmat, data->map);
1252                         data->map = NULL;
1253                 }
1254         }
1255         if (ring->data_dmat != NULL) {
1256                 bus_dma_tag_destroy(ring->data_dmat);
1257                 ring->data_dmat = NULL;
1258         }
1259 }
1260
1261 /*
1262  * High-level hardware frobbing routines
1263  */
1264
1265 static void
1266 iwm_enable_interrupts(struct iwm_softc *sc)
1267 {
1268         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1269         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1270 }
1271
1272 static void
1273 iwm_restore_interrupts(struct iwm_softc *sc)
1274 {
1275         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1276 }
1277
1278 static void
1279 iwm_disable_interrupts(struct iwm_softc *sc)
1280 {
1281         /* disable interrupts */
1282         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1283
1284         /* acknowledge all interrupts */
1285         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1286         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1287 }
1288
1289 static void
1290 iwm_ict_reset(struct iwm_softc *sc)
1291 {
1292         iwm_disable_interrupts(sc);
1293
1294         /* Reset ICT table. */
1295         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1296         sc->ict_cur = 0;
1297
1298         /* Set physical address of ICT table (4KB aligned). */
1299         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1300             IWM_CSR_DRAM_INT_TBL_ENABLE
1301             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1302             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1303             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1304
1305         /* Switch to ICT interrupt mode in driver. */
1306         sc->sc_flags |= IWM_FLAG_USE_ICT;
1307
1308         /* Re-enable interrupts. */
1309         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1310         iwm_enable_interrupts(sc);
1311 }
1312
1313 /* iwlwifi pcie/trans.c */
1314
1315 /*
1316  * Since this .. hard-resets things, it's time to actually
1317  * mark the first vap (if any) as having no mac context.
1318  * It's annoying, but since the driver is potentially being
1319  * stop/start'ed whilst active (thanks openbsd port!) we
1320  * have to correctly track this.
1321  */
1322 static void
1323 iwm_stop_device(struct iwm_softc *sc)
1324 {
1325         struct ieee80211com *ic = &sc->sc_ic;
1326         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1327         int chnl, qid;
1328         uint32_t mask = 0;
1329
1330         /* tell the device to stop sending interrupts */
1331         iwm_disable_interrupts(sc);
1332
1333         /*
1334          * FreeBSD-local: mark the first vap as not-uploaded,
1335          * so the next transition through auth/assoc
1336          * will correctly populate the MAC context.
1337          */
1338         if (vap) {
1339                 struct iwm_vap *iv = IWM_VAP(vap);
1340                 iv->is_uploaded = 0;
1341         }
1342
1343         /* device going down, Stop using ICT table */
1344         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1345
1346         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1347
1348         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1349
1350         if (iwm_nic_lock(sc)) {
1351                 /* Stop each Tx DMA channel */
1352                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1353                         IWM_WRITE(sc,
1354                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1355                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1356                 }
1357
1358                 /* Wait for DMA channels to be idle */
1359                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1360                     5000)) {
1361                         device_printf(sc->sc_dev,
1362                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1363                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1364                 }
1365                 iwm_nic_unlock(sc);
1366         }
1367         iwm_disable_rx_dma(sc);
1368
1369         /* Stop RX ring. */
1370         iwm_reset_rx_ring(sc, &sc->rxq);
1371
1372         /* Reset all TX rings. */
1373         for (qid = 0; qid < nitems(sc->txq); qid++)
1374                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1375
1376         /*
1377          * Power-down device's busmaster DMA clocks
1378          */
1379         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1380         DELAY(5);
1381
1382         /* Make sure (redundant) we've released our request to stay awake */
1383         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1384             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1385
1386         /* Stop the device, and put it in low power state */
1387         iwm_apm_stop(sc);
1388
1389         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1390          * Clean again the interrupt here
1391          */
1392         iwm_disable_interrupts(sc);
1393         /* stop and reset the on-board processor */
1394         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1395
1396         /*
1397          * Even if we stop the HW, we still want the RF kill
1398          * interrupt
1399          */
1400         iwm_enable_rfkill_int(sc);
1401         iwm_check_rfkill(sc);
1402 }
1403
1404 /* iwlwifi: mvm/ops.c */
1405 static void
1406 iwm_mvm_nic_config(struct iwm_softc *sc)
1407 {
1408         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1409         uint32_t reg_val = 0;
1410         uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1411
1412         radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1413             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1414         radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1415             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1416         radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1417             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1418
1419         /* SKU control */
1420         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1421             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1422         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1423             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1424
1425         /* radio configuration */
1426         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1427         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1428         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1429
1430         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1431
1432         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1433             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1434             radio_cfg_step, radio_cfg_dash);
1435
1436         /*
1437          * W/A : NIC is stuck in a reset state after Early PCIe power off
1438          * (PCIe power is lost before PERST# is asserted), causing ME FW
1439          * to lose ownership and not being able to obtain it back.
1440          */
1441         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1442                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1443                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1444                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1445         }
1446 }
1447
1448 static int
1449 iwm_nic_rx_init(struct iwm_softc *sc)
1450 {
1451         if (!iwm_nic_lock(sc))
1452                 return EBUSY;
1453
1454         /*
1455          * Initialize RX ring.  This is from the iwn driver.
1456          */
1457         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1458
1459         /* stop DMA */
1460         iwm_disable_rx_dma(sc);
1461         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1462         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1463         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1464         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1465
1466         /* Set physical address of RX ring (256-byte aligned). */
1467         IWM_WRITE(sc,
1468             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1469
1470         /* Set physical address of RX status (16-byte aligned). */
1471         IWM_WRITE(sc,
1472             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1473
1474         /* Enable RX. */
1475         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1476             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1477             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1478             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1479             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1480             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1481             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1482             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1483
1484         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1485
1486         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1487         if (sc->cfg->host_interrupt_operation_mode)
1488                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1489
1490         /*
1491          * Thus sayeth el jefe (iwlwifi) via a comment:
1492          *
1493          * This value should initially be 0 (before preparing any
1494          * RBs), should be 8 after preparing the first 8 RBs (for example)
1495          */
1496         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1497
1498         iwm_nic_unlock(sc);
1499
1500         return 0;
1501 }
1502
1503 static int
1504 iwm_nic_tx_init(struct iwm_softc *sc)
1505 {
1506         int qid;
1507
1508         if (!iwm_nic_lock(sc))
1509                 return EBUSY;
1510
1511         /* Deactivate TX scheduler. */
1512         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1513
1514         /* Set physical address of "keep warm" page (16-byte aligned). */
1515         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1516
1517         /* Initialize TX rings. */
1518         for (qid = 0; qid < nitems(sc->txq); qid++) {
1519                 struct iwm_tx_ring *txq = &sc->txq[qid];
1520
1521                 /* Set physical address of TX ring (256-byte aligned). */
1522                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1523                     txq->desc_dma.paddr >> 8);
1524                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1525                     "%s: loading ring %d descriptors (%p) at %lx\n",
1526                     __func__,
1527                     qid, txq->desc,
1528                     (unsigned long) (txq->desc_dma.paddr >> 8));
1529         }
1530
1531         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1532
1533         iwm_nic_unlock(sc);
1534
1535         return 0;
1536 }
1537
1538 static int
1539 iwm_nic_init(struct iwm_softc *sc)
1540 {
1541         int error;
1542
1543         iwm_apm_init(sc);
1544         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1545                 iwm_set_pwr(sc);
1546
1547         iwm_mvm_nic_config(sc);
1548
1549         if ((error = iwm_nic_rx_init(sc)) != 0)
1550                 return error;
1551
1552         /*
1553          * Ditto for TX, from iwn
1554          */
1555         if ((error = iwm_nic_tx_init(sc)) != 0)
1556                 return error;
1557
1558         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1559             "%s: shadow registers enabled\n", __func__);
1560         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1561
1562         return 0;
1563 }
1564
1565 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1566         IWM_MVM_TX_FIFO_VO,
1567         IWM_MVM_TX_FIFO_VI,
1568         IWM_MVM_TX_FIFO_BE,
1569         IWM_MVM_TX_FIFO_BK,
1570 };
1571
1572 static int
1573 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1574 {
1575         if (!iwm_nic_lock(sc)) {
1576                 device_printf(sc->sc_dev,
1577                     "%s: cannot enable txq %d\n",
1578                     __func__,
1579                     qid);
1580                 return EBUSY;
1581         }
1582
1583         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1584
1585         if (qid == IWM_MVM_CMD_QUEUE) {
1586                 /* unactivate before configuration */
1587                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1588                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1589                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1590
1591                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1592
1593                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1594
1595                 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1596                 /* Set scheduler window size and frame limit. */
1597                 iwm_write_mem32(sc,
1598                     sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1599                     sizeof(uint32_t),
1600                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1601                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1602                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1603                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1604
1605                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1606                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1607                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1608                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1609                     IWM_SCD_QUEUE_STTS_REG_MSK);
1610         } else {
1611                 struct iwm_scd_txq_cfg_cmd cmd;
1612                 int error;
1613
1614                 iwm_nic_unlock(sc);
1615
1616                 memset(&cmd, 0, sizeof(cmd));
1617                 cmd.scd_queue = qid;
1618                 cmd.enable = 1;
1619                 cmd.sta_id = sta_id;
1620                 cmd.tx_fifo = fifo;
1621                 cmd.aggregate = 0;
1622                 cmd.window = IWM_FRAME_LIMIT;
1623
1624                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1625                     sizeof(cmd), &cmd);
1626                 if (error) {
1627                         device_printf(sc->sc_dev,
1628                             "cannot enable txq %d\n", qid);
1629                         return error;
1630                 }
1631
1632                 if (!iwm_nic_lock(sc))
1633                         return EBUSY;
1634         }
1635
1636         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1637             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1638
1639         iwm_nic_unlock(sc);
1640
1641         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1642             __func__, qid, fifo);
1643
1644         return 0;
1645 }
1646
1647 static int
1648 iwm_post_alive(struct iwm_softc *sc)
1649 {
1650         int nwords;
1651         int error, chnl;
1652         uint32_t base;
1653
1654         if (!iwm_nic_lock(sc))
1655                 return EBUSY;
1656
1657         base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1658         if (sc->sched_base != base) {
1659                 device_printf(sc->sc_dev,
1660                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1661                     __func__, sc->sched_base, base);
1662         }
1663
1664         iwm_ict_reset(sc);
1665
1666         /* Clear TX scheduler state in SRAM. */
1667         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1668             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1669             / sizeof(uint32_t);
1670         error = iwm_write_mem(sc,
1671             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1672             NULL, nwords);
1673         if (error)
1674                 goto out;
1675
1676         /* Set physical address of TX scheduler rings (1KB aligned). */
1677         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1678
1679         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1680
1681         iwm_nic_unlock(sc);
1682
1683         /* enable command channel */
1684         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1685         if (error)
1686                 return error;
1687
1688         if (!iwm_nic_lock(sc))
1689                 return EBUSY;
1690
1691         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1692
1693         /* Enable DMA channels. */
1694         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1695                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1696                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1697                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1698         }
1699
1700         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1701             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1702
1703         /* Enable L1-Active */
1704         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1705                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1706                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1707         }
1708
1709  out:
1710         iwm_nic_unlock(sc);
1711         return error;
1712 }
1713
1714 /*
1715  * NVM read access and content parsing.  We do not support
1716  * external NVM or writing NVM.
1717  * iwlwifi/mvm/nvm.c
1718  */
1719
1720 /* Default NVM size to read */
1721 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1722
1723 #define IWM_NVM_WRITE_OPCODE 1
1724 #define IWM_NVM_READ_OPCODE 0
1725
1726 /* load nvm chunk response */
1727 enum {
1728         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1729         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1730 };
1731
1732 static int
1733 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1734         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1735 {
1736         struct iwm_nvm_access_cmd nvm_access_cmd = {
1737                 .offset = htole16(offset),
1738                 .length = htole16(length),
1739                 .type = htole16(section),
1740                 .op_code = IWM_NVM_READ_OPCODE,
1741         };
1742         struct iwm_nvm_access_resp *nvm_resp;
1743         struct iwm_rx_packet *pkt;
1744         struct iwm_host_cmd cmd = {
1745                 .id = IWM_NVM_ACCESS_CMD,
1746                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1747                 .data = { &nvm_access_cmd, },
1748         };
1749         int ret, bytes_read, offset_read;
1750         uint8_t *resp_data;
1751
1752         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1753
1754         ret = iwm_send_cmd(sc, &cmd);
1755         if (ret) {
1756                 device_printf(sc->sc_dev,
1757                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1758                 return ret;
1759         }
1760
1761         pkt = cmd.resp_pkt;
1762
1763         /* Extract NVM response */
1764         nvm_resp = (void *)pkt->data;
1765         ret = le16toh(nvm_resp->status);
1766         bytes_read = le16toh(nvm_resp->length);
1767         offset_read = le16toh(nvm_resp->offset);
1768         resp_data = nvm_resp->data;
1769         if (ret) {
1770                 if ((offset != 0) &&
1771                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1772                         /*
1773                          * meaning of NOT_VALID_ADDRESS:
1774                          * driver try to read chunk from address that is
1775                          * multiple of 2K and got an error since addr is empty.
1776                          * meaning of (offset != 0): driver already
1777                          * read valid data from another chunk so this case
1778                          * is not an error.
1779                          */
1780                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1781                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1782                                     offset);
1783                         *len = 0;
1784                         ret = 0;
1785                 } else {
1786                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1787                                     "NVM access command failed with status %d\n", ret);
1788                         ret = EIO;
1789                 }
1790                 goto exit;
1791         }
1792
1793         if (offset_read != offset) {
1794                 device_printf(sc->sc_dev,
1795                     "NVM ACCESS response with invalid offset %d\n",
1796                     offset_read);
1797                 ret = EINVAL;
1798                 goto exit;
1799         }
1800
1801         if (bytes_read > length) {
1802                 device_printf(sc->sc_dev,
1803                     "NVM ACCESS response with too much data "
1804                     "(%d bytes requested, %d bytes received)\n",
1805                     length, bytes_read);
1806                 ret = EINVAL;
1807                 goto exit;
1808         }
1809
1810         /* Write data to NVM */
1811         memcpy(data + offset, resp_data, bytes_read);
1812         *len = bytes_read;
1813
1814  exit:
1815         iwm_free_resp(sc, &cmd);
1816         return ret;
1817 }
1818
1819 /*
1820  * Reads an NVM section completely.
1821  * NICs prior to 7000 family don't have a real NVM, but just read
1822  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1823  * by uCode, we need to manually check in this case that we don't
1824  * overflow and try to read more than the EEPROM size.
1825  * For 7000 family NICs, we supply the maximal size we can read, and
1826  * the uCode fills the response with as much data as we can,
1827  * without overflowing, so no check is needed.
1828  */
1829 static int
1830 iwm_nvm_read_section(struct iwm_softc *sc,
1831         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1832 {
1833         uint16_t seglen, length, offset = 0;
1834         int ret;
1835
1836         /* Set nvm section read length */
1837         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1838
1839         seglen = length;
1840
1841         /* Read the NVM until exhausted (reading less than requested) */
1842         while (seglen == length) {
1843                 /* Check no memory assumptions fail and cause an overflow */
1844                 if ((size_read + offset + length) >
1845                     sc->cfg->eeprom_size) {
1846                         device_printf(sc->sc_dev,
1847                             "EEPROM size is too small for NVM\n");
1848                         return ENOBUFS;
1849                 }
1850
1851                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1852                 if (ret) {
1853                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1854                                     "Cannot read NVM from section %d offset %d, length %d\n",
1855                                     section, offset, length);
1856                         return ret;
1857                 }
1858                 offset += seglen;
1859         }
1860
1861         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1862                     "NVM section %d read completed\n", section);
1863         *len = offset;
1864         return 0;
1865 }
1866
1867 /*
1868  * BEGIN IWM_NVM_PARSE
1869  */
1870
1871 /* iwlwifi/iwl-nvm-parse.c */
1872
1873 /* NVM offsets (in words) definitions */
1874 enum iwm_nvm_offsets {
1875         /* NVM HW-Section offset (in words) definitions */
1876         IWM_HW_ADDR = 0x15,
1877
1878 /* NVM SW-Section offset (in words) definitions */
1879         IWM_NVM_SW_SECTION = 0x1C0,
1880         IWM_NVM_VERSION = 0,
1881         IWM_RADIO_CFG = 1,
1882         IWM_SKU = 2,
1883         IWM_N_HW_ADDRS = 3,
1884         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1885
1886 /* NVM calibration section offset (in words) definitions */
1887         IWM_NVM_CALIB_SECTION = 0x2B8,
1888         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1889 };
1890
1891 enum iwm_8000_nvm_offsets {
1892         /* NVM HW-Section offset (in words) definitions */
1893         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1894         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1895         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1896         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1897         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1898
1899         /* NVM SW-Section offset (in words) definitions */
1900         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1901         IWM_NVM_VERSION_8000 = 0,
1902         IWM_RADIO_CFG_8000 = 0,
1903         IWM_SKU_8000 = 2,
1904         IWM_N_HW_ADDRS_8000 = 3,
1905
1906         /* NVM REGULATORY -Section offset (in words) definitions */
1907         IWM_NVM_CHANNELS_8000 = 0,
1908         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1909         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1910         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1911
1912         /* NVM calibration section offset (in words) definitions */
1913         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1914         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1915 };
1916
1917 /* SKU Capabilities (actual values from NVM definition) */
1918 enum nvm_sku_bits {
1919         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1920         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1921         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1922         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1923 };
1924
1925 /* radio config bits (actual values from NVM definition) */
1926 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1927 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1928 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1929 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1930 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1931 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1932
1933 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1934 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1935 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1936 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1937 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1938 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1939
1940 #define DEFAULT_MAX_TX_POWER 16
1941
1942 /**
1943  * enum iwm_nvm_channel_flags - channel flags in NVM
1944  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1945  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1946  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1947  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1948  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1949  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1950  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1951  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1952  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1953  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1954  */
1955 enum iwm_nvm_channel_flags {
1956         IWM_NVM_CHANNEL_VALID = (1 << 0),
1957         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1958         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1959         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1960         IWM_NVM_CHANNEL_DFS = (1 << 7),
1961         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1962         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1963         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1964         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1965 };
1966
1967 /*
1968  * Translate EEPROM flags to net80211.
1969  */
1970 static uint32_t
1971 iwm_eeprom_channel_flags(uint16_t ch_flags)
1972 {
1973         uint32_t nflags;
1974
1975         nflags = 0;
1976         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1977                 nflags |= IEEE80211_CHAN_PASSIVE;
1978         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1979                 nflags |= IEEE80211_CHAN_NOADHOC;
1980         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1981                 nflags |= IEEE80211_CHAN_DFS;
1982                 /* Just in case. */
1983                 nflags |= IEEE80211_CHAN_NOADHOC;
1984         }
1985
1986         return (nflags);
1987 }
1988
1989 static void
1990 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1991     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1992     const uint8_t bands[])
1993 {
1994         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1995         uint32_t nflags;
1996         uint16_t ch_flags;
1997         uint8_t ieee;
1998         int error;
1999
2000         for (; ch_idx < ch_num; ch_idx++) {
2001                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2002                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2003                         ieee = iwm_nvm_channels[ch_idx];
2004                 else
2005                         ieee = iwm_nvm_channels_8000[ch_idx];
2006
2007                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2008                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2009                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
2010                             ieee, ch_flags,
2011                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2012                             "5.2" : "2.4");
2013                         continue;
2014                 }
2015
2016                 nflags = iwm_eeprom_channel_flags(ch_flags);
2017                 error = ieee80211_add_channel(chans, maxchans, nchans,
2018                     ieee, 0, 0, nflags, bands);
2019                 if (error != 0)
2020                         break;
2021
2022                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2023                     "Ch. %d Flags %x [%sGHz] - Added\n",
2024                     ieee, ch_flags,
2025                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2026                     "5.2" : "2.4");
2027         }
2028 }
2029
2030 static void
2031 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2032     struct ieee80211_channel chans[])
2033 {
2034         struct iwm_softc *sc = ic->ic_softc;
2035         struct iwm_nvm_data *data = sc->nvm_data;
2036         uint8_t bands[IEEE80211_MODE_BYTES];
2037         size_t ch_num;
2038
2039         memset(bands, 0, sizeof(bands));
2040         /* 1-13: 11b/g channels. */
2041         setbit(bands, IEEE80211_MODE_11B);
2042         setbit(bands, IEEE80211_MODE_11G);
2043         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2044             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2045
2046         /* 14: 11b channel only. */
2047         clrbit(bands, IEEE80211_MODE_11G);
2048         iwm_add_channel_band(sc, chans, maxchans, nchans,
2049             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2050
2051         if (data->sku_cap_band_52GHz_enable) {
2052                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2053                         ch_num = nitems(iwm_nvm_channels);
2054                 else
2055                         ch_num = nitems(iwm_nvm_channels_8000);
2056                 memset(bands, 0, sizeof(bands));
2057                 setbit(bands, IEEE80211_MODE_11A);
2058                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2059                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2060         }
2061 }
2062
2063 static void
2064 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2065         const uint16_t *mac_override, const uint16_t *nvm_hw)
2066 {
2067         const uint8_t *hw_addr;
2068
2069         if (mac_override) {
2070                 static const uint8_t reserved_mac[] = {
2071                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2072                 };
2073
2074                 hw_addr = (const uint8_t *)(mac_override +
2075                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2076
2077                 /*
2078                  * Store the MAC address from MAO section.
2079                  * No byte swapping is required in MAO section
2080                  */
2081                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2082
2083                 /*
2084                  * Force the use of the OTP MAC address in case of reserved MAC
2085                  * address in the NVM, or if address is given but invalid.
2086                  */
2087                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2088                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2089                     iwm_is_valid_ether_addr(data->hw_addr) &&
2090                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2091                         return;
2092
2093                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2094                     "%s: mac address from nvm override section invalid\n",
2095                     __func__);
2096         }
2097
2098         if (nvm_hw) {
2099                 /* read the mac address from WFMP registers */
2100                 uint32_t mac_addr0 =
2101                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2102                 uint32_t mac_addr1 =
2103                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2104
2105                 hw_addr = (const uint8_t *)&mac_addr0;
2106                 data->hw_addr[0] = hw_addr[3];
2107                 data->hw_addr[1] = hw_addr[2];
2108                 data->hw_addr[2] = hw_addr[1];
2109                 data->hw_addr[3] = hw_addr[0];
2110
2111                 hw_addr = (const uint8_t *)&mac_addr1;
2112                 data->hw_addr[4] = hw_addr[1];
2113                 data->hw_addr[5] = hw_addr[0];
2114
2115                 return;
2116         }
2117
2118         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2119         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2120 }
2121
2122 static int
2123 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2124             const uint16_t *phy_sku)
2125 {
2126         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2127                 return le16_to_cpup(nvm_sw + IWM_SKU);
2128
2129         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2130 }
2131
2132 static int
2133 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2134 {
2135         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2136                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2137         else
2138                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2139                                                 IWM_NVM_VERSION_8000));
2140 }
2141
2142 static int
2143 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2144                   const uint16_t *phy_sku)
2145 {
2146         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2147                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2148
2149         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2150 }
2151
2152 static int
2153 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2154 {
2155         int n_hw_addr;
2156
2157         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2158                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2159
2160         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2161
2162         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2163 }
2164
2165 static void
2166 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2167                   uint32_t radio_cfg)
2168 {
2169         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2170                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2171                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2172                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2173                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2174                 return;
2175         }
2176
2177         /* set the radio configuration for family 8000 */
2178         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2179         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2180         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2181         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2182         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2183         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2184 }
2185
2186 static int
2187 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2188                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2189 {
2190 #ifdef notyet /* for FAMILY 9000 */
2191         if (cfg->mac_addr_from_csr) {
2192                 iwm_set_hw_address_from_csr(sc, data);
2193         } else
2194 #endif
2195         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2196                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2197
2198                 /* The byte order is little endian 16 bit, meaning 214365 */
2199                 data->hw_addr[0] = hw_addr[1];
2200                 data->hw_addr[1] = hw_addr[0];
2201                 data->hw_addr[2] = hw_addr[3];
2202                 data->hw_addr[3] = hw_addr[2];
2203                 data->hw_addr[4] = hw_addr[5];
2204                 data->hw_addr[5] = hw_addr[4];
2205         } else {
2206                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2207         }
2208
2209         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2210                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2211                 return EINVAL;
2212         }
2213
2214         return 0;
2215 }
2216
2217 static struct iwm_nvm_data *
2218 iwm_parse_nvm_data(struct iwm_softc *sc,
2219                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2220                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2221                    const uint16_t *phy_sku, const uint16_t *regulatory)
2222 {
2223         struct iwm_nvm_data *data;
2224         uint32_t sku, radio_cfg;
2225
2226         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2227                 data = malloc(sizeof(*data) +
2228                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2229                     M_DEVBUF, M_NOWAIT | M_ZERO);
2230         } else {
2231                 data = malloc(sizeof(*data) +
2232                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2233                     M_DEVBUF, M_NOWAIT | M_ZERO);
2234         }
2235         if (!data)
2236                 return NULL;
2237
2238         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2239
2240         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2241         iwm_set_radio_cfg(sc, data, radio_cfg);
2242
2243         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2244         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2245         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2246         data->sku_cap_11n_enable = 0;
2247
2248         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2249
2250         /* If no valid mac address was found - bail out */
2251         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2252                 free(data, M_DEVBUF);
2253                 return NULL;
2254         }
2255
2256         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2257                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2258                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2259         } else {
2260                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2261                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2262         }
2263
2264         return data;
2265 }
2266
2267 static void
2268 iwm_free_nvm_data(struct iwm_nvm_data *data)
2269 {
2270         if (data != NULL)
2271                 free(data, M_DEVBUF);
2272 }
2273
2274 static struct iwm_nvm_data *
2275 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2276 {
2277         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2278
2279         /* Checking for required sections */
2280         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2281                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2282                     !sections[sc->cfg->nvm_hw_section_num].data) {
2283                         device_printf(sc->sc_dev,
2284                             "Can't parse empty OTP/NVM sections\n");
2285                         return NULL;
2286                 }
2287         } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2288                 /* SW and REGULATORY sections are mandatory */
2289                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2290                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2291                         device_printf(sc->sc_dev,
2292                             "Can't parse empty OTP/NVM sections\n");
2293                         return NULL;
2294                 }
2295                 /* MAC_OVERRIDE or at least HW section must exist */
2296                 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2297                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2298                         device_printf(sc->sc_dev,
2299                             "Can't parse mac_address, empty sections\n");
2300                         return NULL;
2301                 }
2302
2303                 /* PHY_SKU section is mandatory in B0 */
2304                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2305                         device_printf(sc->sc_dev,
2306                             "Can't parse phy_sku in B0, empty sections\n");
2307                         return NULL;
2308                 }
2309         } else {
2310                 panic("unknown device family %d\n", sc->cfg->device_family);
2311         }
2312
2313         hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2314         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2315         calib = (const uint16_t *)
2316             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2317         regulatory = (const uint16_t *)
2318             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2319         mac_override = (const uint16_t *)
2320             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2321         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2322
2323         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2324             phy_sku, regulatory);
2325 }
2326
2327 static int
2328 iwm_nvm_init(struct iwm_softc *sc)
2329 {
2330         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2331         int i, ret, section;
2332         uint32_t size_read = 0;
2333         uint8_t *nvm_buffer, *temp;
2334         uint16_t len;
2335
2336         memset(nvm_sections, 0, sizeof(nvm_sections));
2337
2338         if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2339                 return EINVAL;
2340
2341         /* load NVM values from nic */
2342         /* Read From FW NVM */
2343         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2344
2345         nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2346         if (!nvm_buffer)
2347                 return ENOMEM;
2348         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2349                 /* we override the constness for initial read */
2350                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2351                                            &len, size_read);
2352                 if (ret)
2353                         continue;
2354                 size_read += len;
2355                 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2356                 if (!temp) {
2357                         ret = ENOMEM;
2358                         break;
2359                 }
2360                 memcpy(temp, nvm_buffer, len);
2361
2362                 nvm_sections[section].data = temp;
2363                 nvm_sections[section].length = len;
2364         }
2365         if (!size_read)
2366                 device_printf(sc->sc_dev, "OTP is blank\n");
2367         free(nvm_buffer, M_DEVBUF);
2368
2369         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2370         if (!sc->nvm_data)
2371                 return EINVAL;
2372         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2373                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2374
2375         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2376                 if (nvm_sections[i].data != NULL)
2377                         free(nvm_sections[i].data, M_DEVBUF);
2378         }
2379
2380         return 0;
2381 }
2382
2383 /*
2384  * Firmware loading gunk.  This is kind of a weird hybrid between the
2385  * iwn driver and the Linux iwlwifi driver.
2386  */
2387
2388 static int
2389 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2390         const uint8_t *section, uint32_t byte_cnt)
2391 {
2392         int error = EINVAL;
2393         uint32_t chunk_sz, offset;
2394
2395         chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2396
2397         for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2398                 uint32_t addr, len;
2399                 const uint8_t *data;
2400
2401                 addr = dst_addr + offset;
2402                 len = MIN(chunk_sz, byte_cnt - offset);
2403                 data = section + offset;
2404
2405                 error = iwm_firmware_load_chunk(sc, addr, data, len);
2406                 if (error)
2407                         break;
2408         }
2409
2410         return error;
2411 }
2412
2413 static int
2414 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2415         const uint8_t *chunk, uint32_t byte_cnt)
2416 {
2417         struct iwm_dma_info *dma = &sc->fw_dma;
2418         int error;
2419
2420         /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2421         memcpy(dma->vaddr, chunk, byte_cnt);
2422         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2423
2424         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2425             dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2426                 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2427                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2428         }
2429
2430         sc->sc_fw_chunk_done = 0;
2431
2432         if (!iwm_nic_lock(sc))
2433                 return EBUSY;
2434
2435         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2436             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2437         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2438             dst_addr);
2439         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2440             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2441         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2442             (iwm_get_dma_hi_addr(dma->paddr)
2443               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2444         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2445             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2446             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2447             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2448         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2449             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2450             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2451             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2452
2453         iwm_nic_unlock(sc);
2454
2455         /* wait 1s for this segment to load */
2456         while (!sc->sc_fw_chunk_done)
2457                 if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
2458                         break;
2459
2460         if (!sc->sc_fw_chunk_done) {
2461                 device_printf(sc->sc_dev,
2462                     "fw chunk addr 0x%x len %d failed to load\n",
2463                     dst_addr, byte_cnt);
2464         }
2465
2466         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2467             dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2468                 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2469                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2470                 iwm_nic_unlock(sc);
2471         }
2472
2473         return error;
2474 }
2475
2476 int
2477 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2478     int cpu, int *first_ucode_section)
2479 {
2480         int shift_param;
2481         int i, error = 0, sec_num = 0x1;
2482         uint32_t val, last_read_idx = 0;
2483         const void *data;
2484         uint32_t dlen;
2485         uint32_t offset;
2486
2487         if (cpu == 1) {
2488                 shift_param = 0;
2489                 *first_ucode_section = 0;
2490         } else {
2491                 shift_param = 16;
2492                 (*first_ucode_section)++;
2493         }
2494
2495         for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
2496                 last_read_idx = i;
2497                 data = fws->fw_sect[i].fws_data;
2498                 dlen = fws->fw_sect[i].fws_len;
2499                 offset = fws->fw_sect[i].fws_devoff;
2500
2501                 /*
2502                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2503                  * CPU1 to CPU2.
2504                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2505                  * CPU2 non paged to CPU2 paging sec.
2506                  */
2507                 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2508                     offset == IWM_PAGING_SEPARATOR_SECTION)
2509                         break;
2510
2511                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2512                     "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2513                     i, offset, dlen, cpu);
2514
2515                 if (dlen > sc->sc_fwdmasegsz) {
2516                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2517                             "chunk %d too large (%d bytes)\n", i, dlen);
2518                         error = EFBIG;
2519                 } else {
2520                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2521                 }
2522                 if (error) {
2523                         device_printf(sc->sc_dev,
2524                             "could not load firmware chunk %d (error %d)\n",
2525                             i, error);
2526                         return error;
2527                 }
2528
2529                 /* Notify the ucode of the loaded section number and status */
2530                 if (iwm_nic_lock(sc)) {
2531                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2532                         val = val | (sec_num << shift_param);
2533                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2534                         sec_num = (sec_num << 1) | 0x1;
2535                         iwm_nic_unlock(sc);
2536
2537                         /*
2538                          * The firmware won't load correctly without this delay.
2539                          */
2540                         DELAY(8000);
2541                 }
2542         }
2543
2544         *first_ucode_section = last_read_idx;
2545
2546         if (iwm_nic_lock(sc)) {
2547                 if (cpu == 1)
2548                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2549                 else
2550                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2551                 iwm_nic_unlock(sc);
2552         }
2553
2554         return 0;
2555 }
2556
2557 int
2558 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2559 {
2560         struct iwm_fw_sects *fws;
2561         int error = 0;
2562         int first_ucode_section;
2563
2564         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2565             ucode_type);
2566
2567         fws = &sc->sc_fw.fw_sects[ucode_type];
2568
2569         /* configure the ucode to be ready to get the secured image */
2570         /* release CPU reset */
2571         iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2572
2573         /* load to FW the binary Secured sections of CPU1 */
2574         error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2575         if (error)
2576                 return error;
2577
2578         /* load to FW the binary sections of CPU2 */
2579         return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2580 }
2581
2582 static int
2583 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2584 {
2585         struct iwm_fw_sects *fws;
2586         int error, i;
2587         const void *data;
2588         uint32_t dlen;
2589         uint32_t offset;
2590
2591         sc->sc_uc.uc_intr = 0;
2592
2593         fws = &sc->sc_fw.fw_sects[ucode_type];
2594         for (i = 0; i < fws->fw_count; i++) {
2595                 data = fws->fw_sect[i].fws_data;
2596                 dlen = fws->fw_sect[i].fws_len;
2597                 offset = fws->fw_sect[i].fws_devoff;
2598                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2599                     "LOAD FIRMWARE type %d offset %u len %d\n",
2600                     ucode_type, offset, dlen);
2601                 if (dlen > sc->sc_fwdmasegsz) {
2602                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2603                             "chunk %d too large (%d bytes)\n", i, dlen);
2604                         error = EFBIG;
2605                 } else {
2606                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2607                 }
2608                 if (error) {
2609                         device_printf(sc->sc_dev,
2610                             "could not load firmware chunk %u of %u "
2611                             "(error=%d)\n", i, fws->fw_count, error);
2612                         return error;
2613                 }
2614         }
2615
2616         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2617
2618         return 0;
2619 }
2620
2621 static int
2622 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2623 {
2624         int error, w;
2625
2626         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2627                 error = iwm_load_firmware_8000(sc, ucode_type);
2628         else
2629                 error = iwm_load_firmware_7000(sc, ucode_type);
2630         if (error)
2631                 return error;
2632
2633         /* wait for the firmware to load */
2634         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2635                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2636         }
2637         if (error || !sc->sc_uc.uc_ok) {
2638                 device_printf(sc->sc_dev, "could not load firmware\n");
2639                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2640                         device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2641                             iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2642                         device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2643                             iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2644                 }
2645         }
2646
2647         /*
2648          * Give the firmware some time to initialize.
2649          * Accessing it too early causes errors.
2650          */
2651         msleep(&w, &sc->sc_mtx, 0, "iwmfwinit", hz);
2652
2653         return error;
2654 }
2655
2656 /* iwlwifi: pcie/trans.c */
2657 static int
2658 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2659 {
2660         int error;
2661
2662         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2663
2664         if ((error = iwm_nic_init(sc)) != 0) {
2665                 device_printf(sc->sc_dev, "unable to init nic\n");
2666                 return error;
2667         }
2668
2669         /* make sure rfkill handshake bits are cleared */
2670         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2671         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2672             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2673
2674         /* clear (again), then enable host interrupts */
2675         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2676         iwm_enable_interrupts(sc);
2677
2678         /* really make sure rfkill handshake bits are cleared */
2679         /* maybe we should write a few times more?  just to make sure */
2680         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2681         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2682
2683         /* Load the given image to the HW */
2684         return iwm_load_firmware(sc, ucode_type);
2685 }
2686
2687 static int
2688 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2689 {
2690         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2691                 .valid = htole32(valid_tx_ant),
2692         };
2693
2694         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2695             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2696 }
2697
2698 /* iwlwifi: mvm/fw.c */
2699 static int
2700 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2701 {
2702         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2703         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2704
2705         /* Set parameters */
2706         phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2707         phy_cfg_cmd.calib_control.event_trigger =
2708             sc->sc_default_calib[ucode_type].event_trigger;
2709         phy_cfg_cmd.calib_control.flow_trigger =
2710             sc->sc_default_calib[ucode_type].flow_trigger;
2711
2712         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2713             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2714         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2715             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2716 }
2717
2718 static int
2719 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2720         enum iwm_ucode_type ucode_type)
2721 {
2722         enum iwm_ucode_type old_type = sc->sc_uc_current;
2723         int error;
2724
2725         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2726                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2727                         error);
2728                 return error;
2729         }
2730
2731         sc->sc_uc_current = ucode_type;
2732         error = iwm_start_fw(sc, ucode_type);
2733         if (error) {
2734                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2735                 sc->sc_uc_current = old_type;
2736                 return error;
2737         }
2738
2739         error = iwm_post_alive(sc);
2740         if (error) {
2741                 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2742         }
2743         return error;
2744 }
2745
2746 /*
2747  * mvm misc bits
2748  */
2749
2750 /*
2751  * follows iwlwifi/fw.c
2752  */
2753 static int
2754 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2755 {
2756         int error;
2757
2758         /* do not operate with rfkill switch turned on */
2759         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2760                 device_printf(sc->sc_dev,
2761                     "radio is disabled by hardware switch\n");
2762                 return EPERM;
2763         }
2764
2765         sc->sc_init_complete = 0;
2766         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2767             IWM_UCODE_TYPE_INIT)) != 0) {
2768                 device_printf(sc->sc_dev, "failed to load init firmware\n");
2769                 return error;
2770         }
2771
2772         if (justnvm) {
2773                 if ((error = iwm_nvm_init(sc)) != 0) {
2774                         device_printf(sc->sc_dev, "failed to read nvm\n");
2775                         return error;
2776                 }
2777                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2778
2779                 return 0;
2780         }
2781
2782         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
2783                 device_printf(sc->sc_dev,
2784                     "failed to send bt coex configuration: %d\n", error);
2785                 return error;
2786         }
2787
2788         /* Init Smart FIFO. */
2789         error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2790         if (error != 0)
2791                 return error;
2792
2793 #if 0
2794         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2795             "%s: phy_txant=0x%08x, nvm_valid_tx_ant=0x%02x, valid=0x%02x\n",
2796             __func__,
2797             ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2798               >> IWM_FW_PHY_CFG_TX_CHAIN_POS),
2799             sc->nvm_data->valid_tx_ant,
2800             iwm_fw_valid_tx_ant(sc));
2801 #endif
2802
2803         /* Send TX valid antennas before triggering calibrations */
2804         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
2805         if (error != 0) {
2806                 device_printf(sc->sc_dev,
2807                     "failed to send antennas before calibration: %d\n", error);
2808                 return error;
2809         }
2810
2811         /*
2812          * Send phy configurations command to init uCode
2813          * to start the 16.0 uCode init image internal calibrations.
2814          */
2815         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2816                 device_printf(sc->sc_dev,
2817                     "%s: failed to run internal calibration: %d\n",
2818                     __func__, error);
2819                 return error;
2820         }
2821
2822         /*
2823          * Nothing to do but wait for the init complete notification
2824          * from the firmware
2825          */
2826         while (!sc->sc_init_complete) {
2827                 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2828                                  0, "iwminit", 2*hz);
2829                 if (error) {
2830                         device_printf(sc->sc_dev, "init complete failed: %d\n",
2831                                 sc->sc_init_complete);
2832                         break;
2833                 }
2834         }
2835
2836         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
2837             sc->sc_init_complete ? "" : "not ");
2838
2839         return error;
2840 }
2841
2842 /*
2843  * receive side
2844  */
2845
2846 /* (re)stock rx ring, called at init-time and at runtime */
2847 static int
2848 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2849 {
2850         struct iwm_rx_ring *ring = &sc->rxq;
2851         struct iwm_rx_data *data = &ring->data[idx];
2852         struct mbuf *m;
2853         bus_dmamap_t dmamap = NULL;
2854         bus_dma_segment_t seg;
2855         int nsegs, error;
2856
2857         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2858         if (m == NULL)
2859                 return ENOBUFS;
2860
2861         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2862         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2863             &seg, &nsegs, BUS_DMA_NOWAIT);
2864         if (error != 0) {
2865                 device_printf(sc->sc_dev,
2866                     "%s: can't map mbuf, error %d\n", __func__, error);
2867                 goto fail;
2868         }
2869
2870         if (data->m != NULL)
2871                 bus_dmamap_unload(ring->data_dmat, data->map);
2872
2873         /* Swap ring->spare_map with data->map */
2874         dmamap = data->map;
2875         data->map = ring->spare_map;
2876         ring->spare_map = dmamap;
2877
2878         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2879         data->m = m;
2880
2881         /* Update RX descriptor. */
2882         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
2883         ring->desc[idx] = htole32(seg.ds_addr >> 8);
2884         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2885             BUS_DMASYNC_PREWRITE);
2886
2887         return 0;
2888 fail:
2889         m_freem(m);
2890         return error;
2891 }
2892
2893 /* iwlwifi: mvm/rx.c */
2894 #define IWM_RSSI_OFFSET 50
2895 static int
2896 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2897 {
2898         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2899         uint32_t agc_a, agc_b;
2900         uint32_t val;
2901
2902         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2903         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2904         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2905
2906         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2907         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2908         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2909
2910         /*
2911          * dBm = rssi dB - agc dB - constant.
2912          * Higher AGC (higher radio gain) means lower signal.
2913          */
2914         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2915         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2916         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2917
2918         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2919             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2920             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2921
2922         return max_rssi_dbm;
2923 }
2924
2925 /* iwlwifi: mvm/rx.c */
2926 /*
2927  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2928  * values are reported by the fw as positive values - need to negate
2929  * to obtain their dBM.  Account for missing antennas by replacing 0
2930  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2931  */
2932 static int
2933 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2934 {
2935         int energy_a, energy_b, energy_c, max_energy;
2936         uint32_t val;
2937
2938         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2939         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2940             IWM_RX_INFO_ENERGY_ANT_A_POS;
2941         energy_a = energy_a ? -energy_a : -256;
2942         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2943             IWM_RX_INFO_ENERGY_ANT_B_POS;
2944         energy_b = energy_b ? -energy_b : -256;
2945         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2946             IWM_RX_INFO_ENERGY_ANT_C_POS;
2947         energy_c = energy_c ? -energy_c : -256;
2948         max_energy = MAX(energy_a, energy_b);
2949         max_energy = MAX(max_energy, energy_c);
2950
2951         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2952             "energy In A %d B %d C %d , and max %d\n",
2953             energy_a, energy_b, energy_c, max_energy);
2954
2955         return max_energy;
2956 }
2957
2958 static void
2959 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2960         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2961 {
2962         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2963
2964         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2965         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2966
2967         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2968 }
2969
2970 /*
2971  * Retrieve the average noise (in dBm) among receivers.
2972  */
2973 static int
2974 iwm_get_noise(struct iwm_softc *sc,
2975     const struct iwm_mvm_statistics_rx_non_phy *stats)
2976 {
2977         int i, total, nbant, noise;
2978
2979         total = nbant = noise = 0;
2980         for (i = 0; i < 3; i++) {
2981                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2982                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
2983                     __func__,
2984                     i,
2985                     noise);
2986
2987                 if (noise) {
2988                         total += noise;
2989                         nbant++;
2990                 }
2991         }
2992
2993         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
2994             __func__, nbant, total);
2995 #if 0
2996         /* There should be at least one antenna but check anyway. */
2997         return (nbant == 0) ? -127 : (total / nbant) - 107;
2998 #else
2999         /* For now, just hard-code it to -96 to be safe */
3000         return (-96);
3001 #endif
3002 }
3003
3004 /*
3005  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3006  *
3007  * Handles the actual data of the Rx packet from the fw
3008  */
3009 static void
3010 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3011         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3012 {
3013         struct ieee80211com *ic = &sc->sc_ic;
3014         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3015         struct ieee80211_frame *wh;
3016         struct ieee80211_node *ni;
3017         struct ieee80211_rx_stats rxs;
3018         struct mbuf *m;
3019         struct iwm_rx_phy_info *phy_info;
3020         struct iwm_rx_mpdu_res_start *rx_res;
3021         uint32_t len;
3022         uint32_t rx_pkt_status;
3023         int rssi;
3024
3025         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3026
3027         phy_info = &sc->sc_last_phy_info;
3028         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3029         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3030         len = le16toh(rx_res->byte_count);
3031         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3032
3033         m = data->m;
3034         m->m_data = pkt->data + sizeof(*rx_res);
3035         m->m_pkthdr.len = m->m_len = len;
3036
3037         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3038                 device_printf(sc->sc_dev,
3039                     "dsp size out of range [0,20]: %d\n",
3040                     phy_info->cfg_phy_cnt);
3041                 goto fail;
3042         }
3043
3044         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3045             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3046                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3047                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3048                 goto fail;
3049         }
3050
3051         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3052                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3053         } else {
3054                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3055         }
3056
3057         /* Note: RSSI is absolute (ie a -ve value) */
3058         if (rssi < IWM_MIN_DBM)
3059                 rssi = IWM_MIN_DBM;
3060         else if (rssi > IWM_MAX_DBM)
3061                 rssi = IWM_MAX_DBM;
3062
3063         /* Map it to relative value */
3064         rssi = rssi - sc->sc_noise;
3065
3066         /* replenish ring for the buffer we're going to feed to the sharks */
3067         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3068                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3069                     __func__);
3070                 goto fail;
3071         }
3072
3073         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3074             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3075
3076         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3077
3078         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3079             "%s: phy_info: channel=%d, flags=0x%08x\n",
3080             __func__,
3081             le16toh(phy_info->channel),
3082             le16toh(phy_info->phy_flags));
3083
3084         /*
3085          * Populate an RX state struct with the provided information.
3086          */
3087         bzero(&rxs, sizeof(rxs));
3088         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3089         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3090         rxs.c_ieee = le16toh(phy_info->channel);
3091         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3092                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3093         } else {
3094                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3095         }
3096
3097         /* rssi is in 1/2db units */
3098         rxs.c_rssi = rssi * 2;
3099         rxs.c_nf = sc->sc_noise;
3100         if (ieee80211_add_rx_params(m, &rxs) == 0) {
3101                 if (ni)
3102                         ieee80211_free_node(ni);
3103                 goto fail;
3104         }
3105
3106         if (ieee80211_radiotap_active_vap(vap)) {
3107                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3108
3109                 tap->wr_flags = 0;
3110                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3111                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3112                 tap->wr_chan_freq = htole16(rxs.c_freq);
3113                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3114                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3115                 tap->wr_dbm_antsignal = (int8_t)rssi;
3116                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3117                 tap->wr_tsft = phy_info->system_timestamp;
3118                 switch (phy_info->rate) {
3119                 /* CCK rates. */
3120                 case  10: tap->wr_rate =   2; break;
3121                 case  20: tap->wr_rate =   4; break;
3122                 case  55: tap->wr_rate =  11; break;
3123                 case 110: tap->wr_rate =  22; break;
3124                 /* OFDM rates. */
3125                 case 0xd: tap->wr_rate =  12; break;
3126                 case 0xf: tap->wr_rate =  18; break;
3127                 case 0x5: tap->wr_rate =  24; break;
3128                 case 0x7: tap->wr_rate =  36; break;
3129                 case 0x9: tap->wr_rate =  48; break;
3130                 case 0xb: tap->wr_rate =  72; break;
3131                 case 0x1: tap->wr_rate =  96; break;
3132                 case 0x3: tap->wr_rate = 108; break;
3133                 /* Unknown rate: should not happen. */
3134                 default:  tap->wr_rate =   0;
3135                 }
3136         }
3137
3138         IWM_UNLOCK(sc);
3139         if (ni != NULL) {
3140                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3141                 ieee80211_input_mimo(ni, m);
3142                 ieee80211_free_node(ni);
3143         } else {
3144                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3145                 ieee80211_input_mimo_all(ic, m);
3146         }
3147         IWM_LOCK(sc);
3148
3149         return;
3150
3151 fail:
3152         counter_u64_add(ic->ic_ierrors, 1);
3153 }
3154
3155 static int
3156 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3157         struct iwm_node *in)
3158 {
3159         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3160         struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3161         struct ieee80211_node *ni = &in->in_ni;
3162         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3163
3164         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3165
3166         /* Update rate control statistics. */
3167         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3168             __func__,
3169             (int) le16toh(tx_resp->status.status),
3170             (int) le16toh(tx_resp->status.sequence),
3171             tx_resp->frame_count,
3172             tx_resp->bt_kill_count,
3173             tx_resp->failure_rts,
3174             tx_resp->failure_frame,
3175             le32toh(tx_resp->initial_rate),
3176             (int) le16toh(tx_resp->wireless_media_time));
3177
3178         txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3179                      IEEE80211_RATECTL_STATUS_LONG_RETRY;
3180         txs->short_retries = tx_resp->failure_rts;
3181         txs->long_retries = tx_resp->failure_frame;
3182         if (status != IWM_TX_STATUS_SUCCESS &&
3183             status != IWM_TX_STATUS_DIRECT_DONE) {
3184                 switch (status) {
3185                 case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3186                         txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3187                         break;
3188                 case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3189                         txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3190                         break;
3191                 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3192                         txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3193                         break;
3194                 default:
3195                         txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3196                         break;
3197                 }
3198         } else {
3199                 txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3200         }
3201         ieee80211_ratectl_tx_complete(ni, txs);
3202
3203         return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3204 }
3205
3206 static void
3207 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3208         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3209 {
3210         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3211         int idx = cmd_hdr->idx;
3212         int qid = cmd_hdr->qid;
3213         struct iwm_tx_ring *ring = &sc->txq[qid];
3214         struct iwm_tx_data *txd = &ring->data[idx];
3215         struct iwm_node *in = txd->in;
3216         struct mbuf *m = txd->m;
3217         int status;
3218
3219         KASSERT(txd->done == 0, ("txd not done"));
3220         KASSERT(txd->in != NULL, ("txd without node"));
3221         KASSERT(txd->m != NULL, ("txd without mbuf"));
3222
3223         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3224
3225         sc->sc_tx_timer = 0;
3226
3227         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3228
3229         /* Unmap and free mbuf. */
3230         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3231         bus_dmamap_unload(ring->data_dmat, txd->map);
3232
3233         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3234             "free txd %p, in %p\n", txd, txd->in);
3235         txd->done = 1;
3236         txd->m = NULL;
3237         txd->in = NULL;
3238
3239         ieee80211_tx_complete(&in->in_ni, m, status);
3240
3241         if (--ring->queued < IWM_TX_RING_LOMARK) {
3242                 sc->qfullmsk &= ~(1 << ring->qid);
3243                 if (sc->qfullmsk == 0) {
3244                         /*
3245                          * Well, we're in interrupt context, but then again
3246                          * I guess net80211 does all sorts of stunts in
3247                          * interrupt context, so maybe this is no biggie.
3248                          */
3249                         iwm_start(sc);
3250                 }
3251         }
3252 }
3253
3254 /*
3255  * transmit side
3256  */
3257
3258 /*
3259  * Process a "command done" firmware notification.  This is where we wakeup
3260  * processes waiting for a synchronous command completion.
3261  * from if_iwn
3262  */
3263 static void
3264 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3265 {
3266         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3267         struct iwm_tx_data *data;
3268
3269         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3270                 return; /* Not a command ack. */
3271         }
3272
3273         /* XXX wide commands? */
3274         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3275             "cmd notification type 0x%x qid %d idx %d\n",
3276             pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3277
3278         data = &ring->data[pkt->hdr.idx];
3279
3280         /* If the command was mapped in an mbuf, free it. */
3281         if (data->m != NULL) {
3282                 bus_dmamap_sync(ring->data_dmat, data->map,
3283                     BUS_DMASYNC_POSTWRITE);
3284                 bus_dmamap_unload(ring->data_dmat, data->map);
3285                 m_freem(data->m);
3286                 data->m = NULL;
3287         }
3288         wakeup(&ring->desc[pkt->hdr.idx]);
3289 }
3290
3291 #if 0
3292 /*
3293  * necessary only for block ack mode
3294  */
3295 void
3296 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3297         uint16_t len)
3298 {
3299         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3300         uint16_t w_val;
3301
3302         scd_bc_tbl = sc->sched_dma.vaddr;
3303
3304         len += 8; /* magic numbers came naturally from paris */
3305         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3306                 len = roundup(len, 4) / 4;
3307
3308         w_val = htole16(sta_id << 12 | len);
3309
3310         /* Update TX scheduler. */
3311         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3312         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3313             BUS_DMASYNC_PREWRITE);
3314
3315         /* I really wonder what this is ?!? */
3316         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3317                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3318                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3319                     BUS_DMASYNC_PREWRITE);
3320         }
3321 }
3322 #endif
3323
3324 /*
3325  * Take an 802.11 (non-n) rate, find the relevant rate
3326  * table entry.  return the index into in_ridx[].
3327  *
3328  * The caller then uses that index back into in_ridx
3329  * to figure out the rate index programmed /into/
3330  * the firmware for this given node.
3331  */
3332 static int
3333 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3334     uint8_t rate)
3335 {
3336         int i;
3337         uint8_t r;
3338
3339         for (i = 0; i < nitems(in->in_ridx); i++) {
3340                 r = iwm_rates[in->in_ridx[i]].rate;
3341                 if (rate == r)
3342                         return (i);
3343         }
3344
3345         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3346             "%s: couldn't find an entry for rate=%d\n",
3347             __func__,
3348             rate);
3349
3350         /* XXX Return the first */
3351         /* XXX TODO: have it return the /lowest/ */
3352         return (0);
3353 }
3354
3355 static int
3356 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3357 {
3358         int i;
3359
3360         for (i = 0; i < nitems(iwm_rates); i++) {
3361                 if (iwm_rates[i].rate == rate)
3362                         return (i);
3363         }
3364         /* XXX error? */
3365         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3366             "%s: couldn't find an entry for rate=%d\n",
3367             __func__,
3368             rate);
3369         return (0);
3370 }
3371
3372 /*
3373  * Fill in the rate related information for a transmit command.
3374  */
3375 static const struct iwm_rate *
3376 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3377         struct mbuf *m, struct iwm_tx_cmd *tx)
3378 {
3379         struct ieee80211_node *ni = &in->in_ni;
3380         struct ieee80211_frame *wh;
3381         const struct ieee80211_txparam *tp = ni->ni_txparms;
3382         const struct iwm_rate *rinfo;
3383         int type;
3384         int ridx, rate_flags;
3385
3386         wh = mtod(m, struct ieee80211_frame *);
3387         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3388
3389         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3390         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3391
3392         if (type == IEEE80211_FC0_TYPE_MGT) {
3393                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3394                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3395                     "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3396         } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3397                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3398                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3399                     "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3400         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3401                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3402                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3403                     "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3404         } else if (m->m_flags & M_EAPOL) {
3405                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3406                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3407                     "%s: EAPOL\n", __func__);
3408         } else if (type == IEEE80211_FC0_TYPE_DATA) {
3409                 int i;
3410
3411                 /* for data frames, use RS table */
3412                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3413                 /* XXX pass pktlen */
3414                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3415                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3416                 ridx = in->in_ridx[i];
3417
3418                 /* This is the index into the programmed table */
3419                 tx->initial_rate_index = i;
3420                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3421
3422                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3423                     "%s: start with i=%d, txrate %d\n",
3424                     __func__, i, iwm_rates[ridx].rate);
3425         } else {
3426                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3427                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3428                     __func__, tp->mgmtrate);
3429         }
3430
3431         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3432             "%s: frame type=%d txrate %d\n",
3433                 __func__, type, iwm_rates[ridx].rate);
3434
3435         rinfo = &iwm_rates[ridx];
3436
3437         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3438             __func__, ridx,
3439             rinfo->rate,
3440             !! (IWM_RIDX_IS_CCK(ridx))
3441             );
3442
3443         /* XXX TODO: hard-coded TX antenna? */
3444         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3445         if (IWM_RIDX_IS_CCK(ridx))
3446                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3447         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3448
3449         return rinfo;
3450 }
3451
3452 #define TB0_SIZE 16
3453 static int
3454 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3455 {
3456         struct ieee80211com *ic = &sc->sc_ic;
3457         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3458         struct iwm_node *in = IWM_NODE(ni);
3459         struct iwm_tx_ring *ring;
3460         struct iwm_tx_data *data;
3461         struct iwm_tfd *desc;
3462         struct iwm_device_cmd *cmd;
3463         struct iwm_tx_cmd *tx;
3464         struct ieee80211_frame *wh;
3465         struct ieee80211_key *k = NULL;
3466         struct mbuf *m1;
3467         const struct iwm_rate *rinfo;
3468         uint32_t flags;
3469         u_int hdrlen;
3470         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3471         int nsegs;
3472         uint8_t tid, type;
3473         int i, totlen, error, pad;
3474
3475         wh = mtod(m, struct ieee80211_frame *);
3476         hdrlen = ieee80211_anyhdrsize(wh);
3477         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3478         tid = 0;
3479         ring = &sc->txq[ac];
3480         desc = &ring->desc[ring->cur];
3481         memset(desc, 0, sizeof(*desc));
3482         data = &ring->data[ring->cur];
3483
3484         /* Fill out iwm_tx_cmd to send to the firmware */
3485         cmd = &ring->cmd[ring->cur];
3486         cmd->hdr.code = IWM_TX_CMD;
3487         cmd->hdr.flags = 0;
3488         cmd->hdr.qid = ring->qid;
3489         cmd->hdr.idx = ring->cur;
3490
3491         tx = (void *)cmd->data;
3492         memset(tx, 0, sizeof(*tx));
3493
3494         rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3495
3496         /* Encrypt the frame if need be. */
3497         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3498                 /* Retrieve key for TX && do software encryption. */
3499                 k = ieee80211_crypto_encap(ni, m);
3500                 if (k == NULL) {
3501                         m_freem(m);
3502                         return (ENOBUFS);
3503                 }
3504                 /* 802.11 header may have moved. */
3505                 wh = mtod(m, struct ieee80211_frame *);
3506         }
3507
3508         if (ieee80211_radiotap_active_vap(vap)) {
3509                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3510
3511                 tap->wt_flags = 0;
3512                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3513                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3514                 tap->wt_rate = rinfo->rate;
3515                 if (k != NULL)
3516                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3517                 ieee80211_radiotap_tx(vap, m);
3518         }
3519
3520
3521         totlen = m->m_pkthdr.len;
3522
3523         flags = 0;
3524         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3525                 flags |= IWM_TX_CMD_FLG_ACK;
3526         }
3527
3528         if (type == IEEE80211_FC0_TYPE_DATA
3529             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3530             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3531                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3532         }
3533
3534         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3535             type != IEEE80211_FC0_TYPE_DATA)
3536                 tx->sta_id = sc->sc_aux_sta.sta_id;
3537         else
3538                 tx->sta_id = IWM_STATION_ID;
3539
3540         if (type == IEEE80211_FC0_TYPE_MGT) {
3541                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3542
3543                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3544                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3545                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3546                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3547                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3548                 } else {
3549                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3550                 }
3551         } else {
3552                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3553         }
3554
3555         if (hdrlen & 3) {
3556                 /* First segment length must be a multiple of 4. */
3557                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3558                 pad = 4 - (hdrlen & 3);
3559         } else
3560                 pad = 0;
3561
3562         tx->driver_txop = 0;
3563         tx->next_frame_len = 0;
3564
3565         tx->len = htole16(totlen);
3566         tx->tid_tspec = tid;
3567         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3568
3569         /* Set physical address of "scratch area". */
3570         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3571         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3572
3573         /* Copy 802.11 header in TX command. */
3574         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3575
3576         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3577
3578         tx->sec_ctl = 0;
3579         tx->tx_flags |= htole32(flags);
3580
3581         /* Trim 802.11 header. */
3582         m_adj(m, hdrlen);
3583         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3584             segs, &nsegs, BUS_DMA_NOWAIT);
3585         if (error != 0) {
3586                 if (error != EFBIG) {
3587                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3588                             error);
3589                         m_freem(m);
3590                         return error;
3591                 }
3592                 /* Too many DMA segments, linearize mbuf. */
3593                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3594                 if (m1 == NULL) {
3595                         device_printf(sc->sc_dev,
3596                             "%s: could not defrag mbuf\n", __func__);
3597                         m_freem(m);
3598                         return (ENOBUFS);
3599                 }
3600                 m = m1;
3601
3602                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3603                     segs, &nsegs, BUS_DMA_NOWAIT);
3604                 if (error != 0) {
3605                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3606                             error);
3607                         m_freem(m);
3608                         return error;
3609                 }
3610         }
3611         data->m = m;
3612         data->in = in;
3613         data->done = 0;
3614
3615         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3616             "sending txd %p, in %p\n", data, data->in);
3617         KASSERT(data->in != NULL, ("node is NULL"));
3618
3619         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3620             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3621             ring->qid, ring->cur, totlen, nsegs,
3622             le32toh(tx->tx_flags),
3623             le32toh(tx->rate_n_flags),
3624             tx->initial_rate_index
3625             );
3626
3627         /* Fill TX descriptor. */
3628         desc->num_tbs = 2 + nsegs;
3629
3630         desc->tbs[0].lo = htole32(data->cmd_paddr);
3631         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3632             (TB0_SIZE << 4);
3633         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3634         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3635             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3636               + hdrlen + pad - TB0_SIZE) << 4);
3637
3638         /* Other DMA segments are for data payload. */
3639         for (i = 0; i < nsegs; i++) {
3640                 seg = &segs[i];
3641                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3642                 desc->tbs[i+2].hi_n_len = \
3643                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3644                     | ((seg->ds_len) << 4);
3645         }
3646
3647         bus_dmamap_sync(ring->data_dmat, data->map,
3648             BUS_DMASYNC_PREWRITE);
3649         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3650             BUS_DMASYNC_PREWRITE);
3651         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3652             BUS_DMASYNC_PREWRITE);
3653
3654 #if 0
3655         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3656 #endif
3657
3658         /* Kick TX ring. */
3659         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3660         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3661
3662         /* Mark TX ring as full if we reach a certain threshold. */
3663         if (++ring->queued > IWM_TX_RING_HIMARK) {
3664                 sc->qfullmsk |= 1 << ring->qid;
3665         }
3666
3667         return 0;
3668 }
3669
3670 static int
3671 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3672     const struct ieee80211_bpf_params *params)
3673 {
3674         struct ieee80211com *ic = ni->ni_ic;
3675         struct iwm_softc *sc = ic->ic_softc;
3676         int error = 0;
3677
3678         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3679             "->%s begin\n", __func__);
3680
3681         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3682                 m_freem(m);
3683                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3684                     "<-%s not RUNNING\n", __func__);
3685                 return (ENETDOWN);
3686         }
3687
3688         IWM_LOCK(sc);
3689         /* XXX fix this */
3690         if (params == NULL) {
3691                 error = iwm_tx(sc, m, ni, 0);
3692         } else {
3693                 error = iwm_tx(sc, m, ni, 0);
3694         }
3695         sc->sc_tx_timer = 5;
3696         IWM_UNLOCK(sc);
3697
3698         return (error);
3699 }
3700
3701 /*
3702  * mvm/tx.c
3703  */
3704
3705 /*
3706  * Note that there are transports that buffer frames before they reach
3707  * the firmware. This means that after flush_tx_path is called, the
3708  * queue might not be empty. The race-free way to handle this is to:
3709  * 1) set the station as draining
3710  * 2) flush the Tx path
3711  * 3) wait for the transport queues to be empty
3712  */
3713 int
3714 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3715 {
3716         int ret;
3717         struct iwm_tx_path_flush_cmd flush_cmd = {
3718                 .queues_ctl = htole32(tfd_msk),
3719                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3720         };
3721
3722         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3723             sizeof(flush_cmd), &flush_cmd);
3724         if (ret)
3725                 device_printf(sc->sc_dev,
3726                     "Flushing tx queue failed: %d\n", ret);
3727         return ret;
3728 }
3729
3730 /*
3731  * BEGIN mvm/sta.c
3732  */
3733
3734 static int
3735 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3736         struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3737 {
3738         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3739             cmd, status);
3740 }
3741
3742 /* send station add/update command to firmware */
3743 static int
3744 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3745 {
3746         struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3747         int ret;
3748         uint32_t status;
3749
3750         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3751
3752         add_sta_cmd.sta_id = IWM_STATION_ID;
3753         add_sta_cmd.mac_id_n_color
3754             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3755                 IWM_DEFAULT_COLOR));
3756         if (!update) {
3757                 int ac;
3758                 for (ac = 0; ac < WME_NUM_AC; ac++) {
3759                         add_sta_cmd.tfd_queue_msk |=
3760                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3761                 }
3762                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3763         }
3764         add_sta_cmd.add_modify = update ? 1 : 0;
3765         add_sta_cmd.station_flags_msk
3766             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3767         add_sta_cmd.tid_disable_tx = htole16(0xffff);
3768         if (update)
3769                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3770
3771         status = IWM_ADD_STA_SUCCESS;
3772         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3773         if (ret)
3774                 return ret;
3775
3776         switch (status) {
3777         case IWM_ADD_STA_SUCCESS:
3778                 break;
3779         default:
3780                 ret = EIO;
3781                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3782                 break;
3783         }
3784
3785         return ret;
3786 }
3787
3788 static int
3789 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3790 {
3791         return iwm_mvm_sta_send_to_fw(sc, in, 0);
3792 }
3793
3794 static int
3795 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3796 {
3797         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3798 }
3799
3800 static int
3801 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3802         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3803 {
3804         struct iwm_mvm_add_sta_cmd_v7 cmd;
3805         int ret;
3806         uint32_t status;
3807
3808         memset(&cmd, 0, sizeof(cmd));
3809         cmd.sta_id = sta->sta_id;
3810         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3811
3812         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3813         cmd.tid_disable_tx = htole16(0xffff);
3814
3815         if (addr)
3816                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3817
3818         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3819         if (ret)
3820                 return ret;
3821
3822         switch (status) {
3823         case IWM_ADD_STA_SUCCESS:
3824                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3825                     "%s: Internal station added.\n", __func__);
3826                 return 0;
3827         default:
3828                 device_printf(sc->sc_dev,
3829                     "%s: Add internal station failed, status=0x%x\n",
3830                     __func__, status);
3831                 ret = EIO;
3832                 break;
3833         }
3834         return ret;
3835 }
3836
3837 static int
3838 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3839 {
3840         int ret;
3841
3842         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3843         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3844
3845         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3846         if (ret)
3847                 return ret;
3848
3849         ret = iwm_mvm_add_int_sta_common(sc,
3850             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3851
3852         if (ret)
3853                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3854         return ret;
3855 }
3856
3857 /*
3858  * END mvm/sta.c
3859  */
3860
3861 /*
3862  * BEGIN mvm/quota.c
3863  */
3864
3865 static int
3866 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3867 {
3868         struct iwm_time_quota_cmd cmd;
3869         int i, idx, ret, num_active_macs, quota, quota_rem;
3870         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3871         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3872         uint16_t id;
3873
3874         memset(&cmd, 0, sizeof(cmd));
3875
3876         /* currently, PHY ID == binding ID */
3877         if (in) {
3878                 id = in->in_phyctxt->id;
3879                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3880                 colors[id] = in->in_phyctxt->color;
3881
3882                 if (1)
3883                         n_ifs[id] = 1;
3884         }
3885
3886         /*
3887          * The FW's scheduling session consists of
3888          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3889          * equally between all the bindings that require quota
3890          */
3891         num_active_macs = 0;
3892         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3893                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3894                 num_active_macs += n_ifs[i];
3895         }
3896
3897         quota = 0;
3898         quota_rem = 0;
3899         if (num_active_macs) {
3900                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3901                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3902         }
3903
3904         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3905                 if (colors[i] < 0)
3906                         continue;
3907
3908                 cmd.quotas[idx].id_and_color =
3909                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3910
3911                 if (n_ifs[i] <= 0) {
3912                         cmd.quotas[idx].quota = htole32(0);
3913                         cmd.quotas[idx].max_duration = htole32(0);
3914                 } else {
3915                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3916                         cmd.quotas[idx].max_duration = htole32(0);
3917                 }
3918                 idx++;
3919         }
3920
3921         /* Give the remainder of the session to the first binding */
3922         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3923
3924         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3925             sizeof(cmd), &cmd);
3926         if (ret)
3927                 device_printf(sc->sc_dev,
3928                     "%s: Failed to send quota: %d\n", __func__, ret);
3929         return ret;
3930 }
3931
3932 /*
3933  * END mvm/quota.c
3934  */
3935
3936 /*
3937  * ieee80211 routines
3938  */
3939
3940 /*
3941  * Change to AUTH state in 80211 state machine.  Roughly matches what
3942  * Linux does in bss_info_changed().
3943  */
3944 static int
3945 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3946 {
3947         struct ieee80211_node *ni;
3948         struct iwm_node *in;
3949         struct iwm_vap *iv = IWM_VAP(vap);
3950         uint32_t duration;
3951         int error;
3952
3953         /*
3954          * XXX i have a feeling that the vap node is being
3955          * freed from underneath us. Grr.
3956          */
3957         ni = ieee80211_ref_node(vap->iv_bss);
3958         in = IWM_NODE(ni);
3959         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3960             "%s: called; vap=%p, bss ni=%p\n",
3961             __func__,
3962             vap,
3963             ni);
3964
3965         in->in_assoc = 0;
3966
3967         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3968         if (error != 0)
3969                 return error;
3970
3971         error = iwm_allow_mcast(vap, sc);
3972         if (error) {
3973                 device_printf(sc->sc_dev,
3974                     "%s: failed to set multicast\n", __func__);
3975                 goto out;
3976         }
3977
3978         /*
3979          * This is where it deviates from what Linux does.
3980          *
3981          * Linux iwlwifi doesn't reset the nic each time, nor does it
3982          * call ctxt_add() here.  Instead, it adds it during vap creation,
3983          * and always does a mac_ctx_changed().
3984          *
3985          * The openbsd port doesn't attempt to do that - it reset things
3986          * at odd states and does the add here.
3987          *
3988          * So, until the state handling is fixed (ie, we never reset
3989          * the NIC except for a firmware failure, which should drag
3990          * the NIC back to IDLE, re-setup and re-add all the mac/phy
3991          * contexts that are required), let's do a dirty hack here.
3992          */
3993         if (iv->is_uploaded) {
3994                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3995                         device_printf(sc->sc_dev,
3996                             "%s: failed to update MAC\n", __func__);
3997                         goto out;
3998                 }
3999                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4000                     in->in_ni.ni_chan, 1, 1)) != 0) {
4001                         device_printf(sc->sc_dev,
4002                             "%s: failed update phy ctxt\n", __func__);
4003                         goto out;
4004                 }
4005                 in->in_phyctxt = &sc->sc_phyctxt[0];
4006
4007                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
4008                         device_printf(sc->sc_dev,
4009                             "%s: binding update cmd\n", __func__);
4010                         goto out;
4011                 }
4012                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4013                         device_printf(sc->sc_dev,
4014                             "%s: failed to update sta\n", __func__);
4015                         goto out;
4016                 }
4017         } else {
4018                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4019                         device_printf(sc->sc_dev,
4020                             "%s: failed to add MAC\n", __func__);
4021                         goto out;
4022                 }
4023                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4024                     in->in_ni.ni_chan, 1, 1)) != 0) {
4025                         device_printf(sc->sc_dev,
4026                             "%s: failed add phy ctxt!\n", __func__);
4027                         error = ETIMEDOUT;
4028                         goto out;
4029                 }
4030                 in->in_phyctxt = &sc->sc_phyctxt[0];
4031
4032                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
4033                         device_printf(sc->sc_dev,
4034                             "%s: binding add cmd\n", __func__);
4035                         goto out;
4036                 }
4037                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4038                         device_printf(sc->sc_dev,
4039                             "%s: failed to add sta\n", __func__);
4040                         goto out;
4041                 }
4042         }
4043
4044         /*
4045          * Prevent the FW from wandering off channel during association
4046          * by "protecting" the session with a time event.
4047          */
4048         /* XXX duration is in units of TU, not MS */
4049         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4050         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
4051         DELAY(100);
4052
4053         error = 0;
4054 out:
4055         ieee80211_free_node(ni);
4056         return (error);
4057 }
4058
4059 static int
4060 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4061 {
4062         struct iwm_node *in = IWM_NODE(vap->iv_bss);
4063         int error;
4064
4065         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4066                 device_printf(sc->sc_dev,
4067                     "%s: failed to update STA\n", __func__);
4068                 return error;
4069         }
4070
4071         in->in_assoc = 1;
4072         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4073                 device_printf(sc->sc_dev,
4074                     "%s: failed to update MAC\n", __func__);
4075                 return error;
4076         }
4077
4078         return 0;
4079 }
4080
4081 static int
4082 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4083 {
4084         uint32_t tfd_msk;
4085
4086         /*
4087          * Ok, so *technically* the proper set of calls for going
4088          * from RUN back to SCAN is:
4089          *
4090          * iwm_mvm_power_mac_disable(sc, in);
4091          * iwm_mvm_mac_ctxt_changed(sc, in);
4092          * iwm_mvm_rm_sta(sc, in);
4093          * iwm_mvm_update_quotas(sc, NULL);
4094          * iwm_mvm_mac_ctxt_changed(sc, in);
4095          * iwm_mvm_binding_remove_vif(sc, in);
4096          * iwm_mvm_mac_ctxt_remove(sc, in);
4097          *
4098          * However, that freezes the device not matter which permutations
4099          * and modifications are attempted.  Obviously, this driver is missing
4100          * something since it works in the Linux driver, but figuring out what
4101          * is missing is a little more complicated.  Now, since we're going
4102          * back to nothing anyway, we'll just do a complete device reset.
4103          * Up your's, device!
4104          */
4105         /*
4106          * Just using 0xf for the queues mask is fine as long as we only
4107          * get here from RUN state.
4108          */
4109         tfd_msk = 0xf;
4110         mbufq_drain(&sc->sc_snd);
4111         iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4112         /*
4113          * We seem to get away with just synchronously sending the
4114          * IWM_TXPATH_FLUSH command.
4115          */
4116 //      iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4117         iwm_stop_device(sc);
4118         iwm_init_hw(sc);
4119         if (in)
4120                 in->in_assoc = 0;
4121         return 0;
4122
4123 #if 0
4124         int error;
4125
4126         iwm_mvm_power_mac_disable(sc, in);
4127
4128         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4129                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4130                 return error;
4131         }
4132
4133         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4134                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4135                 return error;
4136         }
4137         error = iwm_mvm_rm_sta(sc, in);
4138         in->in_assoc = 0;
4139         iwm_mvm_update_quotas(sc, NULL);
4140         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4141                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4142                 return error;
4143         }
4144         iwm_mvm_binding_remove_vif(sc, in);
4145
4146         iwm_mvm_mac_ctxt_remove(sc, in);
4147
4148         return error;
4149 #endif
4150 }
4151
4152 static struct ieee80211_node *
4153 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4154 {
4155         return malloc(sizeof (struct iwm_node), M_80211_NODE,
4156             M_NOWAIT | M_ZERO);
4157 }
4158
4159 static void
4160 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4161 {
4162         struct ieee80211_node *ni = &in->in_ni;
4163         struct iwm_lq_cmd *lq = &in->in_lq;
4164         int nrates = ni->ni_rates.rs_nrates;
4165         int i, ridx, tab = 0;
4166 //      int txant = 0;
4167
4168         if (nrates > nitems(lq->rs_table)) {
4169                 device_printf(sc->sc_dev,
4170                     "%s: node supports %d rates, driver handles "
4171                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4172                 return;
4173         }
4174         if (nrates == 0) {
4175                 device_printf(sc->sc_dev,
4176                     "%s: node supports 0 rates, odd!\n", __func__);
4177                 return;
4178         }
4179
4180         /*
4181          * XXX .. and most of iwm_node is not initialised explicitly;
4182          * it's all just 0x0 passed to the firmware.
4183          */
4184
4185         /* first figure out which rates we should support */
4186         /* XXX TODO: this isn't 11n aware /at all/ */
4187         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4188         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4189             "%s: nrates=%d\n", __func__, nrates);
4190
4191         /*
4192          * Loop over nrates and populate in_ridx from the highest
4193          * rate to the lowest rate.  Remember, in_ridx[] has
4194          * IEEE80211_RATE_MAXSIZE entries!
4195          */
4196         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4197                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4198
4199                 /* Map 802.11 rate to HW rate index. */
4200                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4201                         if (iwm_rates[ridx].rate == rate)
4202                                 break;
4203                 if (ridx > IWM_RIDX_MAX) {
4204                         device_printf(sc->sc_dev,
4205                             "%s: WARNING: device rate for %d not found!\n",
4206                             __func__, rate);
4207                 } else {
4208                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4209                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4210                             __func__,
4211                             i,
4212                             rate,
4213                             ridx);
4214                         in->in_ridx[i] = ridx;
4215                 }
4216         }
4217
4218         /* then construct a lq_cmd based on those */
4219         memset(lq, 0, sizeof(*lq));
4220         lq->sta_id = IWM_STATION_ID;
4221
4222         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4223         if (ni->ni_flags & IEEE80211_NODE_HT)
4224                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4225
4226         /*
4227          * are these used? (we don't do SISO or MIMO)
4228          * need to set them to non-zero, though, or we get an error.
4229          */
4230         lq->single_stream_ant_msk = 1;
4231         lq->dual_stream_ant_msk = 1;
4232
4233         /*
4234          * Build the actual rate selection table.
4235          * The lowest bits are the rates.  Additionally,
4236          * CCK needs bit 9 to be set.  The rest of the bits
4237          * we add to the table select the tx antenna
4238          * Note that we add the rates in the highest rate first
4239          * (opposite of ni_rates).
4240          */
4241         /*
4242          * XXX TODO: this should be looping over the min of nrates
4243          * and LQ_MAX_RETRY_NUM.  Sigh.
4244          */
4245         for (i = 0; i < nrates; i++) {
4246                 int nextant;
4247
4248 #if 0
4249                 if (txant == 0)
4250                         txant = iwm_mvm_get_valid_tx_ant(sc);
4251                 nextant = 1<<(ffs(txant)-1);
4252                 txant &= ~nextant;
4253 #else
4254                 nextant = iwm_mvm_get_valid_tx_ant(sc);
4255 #endif
4256                 /*
4257                  * Map the rate id into a rate index into
4258                  * our hardware table containing the
4259                  * configuration to use for this rate.
4260                  */
4261                 ridx = in->in_ridx[i];
4262                 tab = iwm_rates[ridx].plcp;
4263                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4264                 if (IWM_RIDX_IS_CCK(ridx))
4265                         tab |= IWM_RATE_MCS_CCK_MSK;
4266                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4267                     "station rate i=%d, rate=%d, hw=%x\n",
4268                     i, iwm_rates[ridx].rate, tab);
4269                 lq->rs_table[i] = htole32(tab);
4270         }
4271         /* then fill the rest with the lowest possible rate */
4272         for (i = nrates; i < nitems(lq->rs_table); i++) {
4273                 KASSERT(tab != 0, ("invalid tab"));
4274                 lq->rs_table[i] = htole32(tab);
4275         }
4276 }
4277
4278 static int
4279 iwm_media_change(struct ifnet *ifp)
4280 {
4281         struct ieee80211vap *vap = ifp->if_softc;
4282         struct ieee80211com *ic = vap->iv_ic;
4283         struct iwm_softc *sc = ic->ic_softc;
4284         int error;
4285
4286         error = ieee80211_media_change(ifp);
4287         if (error != ENETRESET)
4288                 return error;
4289
4290         IWM_LOCK(sc);
4291         if (ic->ic_nrunning > 0) {
4292                 iwm_stop(sc);
4293                 iwm_init(sc);
4294         }
4295         IWM_UNLOCK(sc);
4296         return error;
4297 }
4298
4299
4300 static int
4301 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4302 {
4303         struct iwm_vap *ivp = IWM_VAP(vap);
4304         struct ieee80211com *ic = vap->iv_ic;
4305         struct iwm_softc *sc = ic->ic_softc;
4306         struct iwm_node *in;
4307         int error;
4308
4309         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4310             "switching state %s -> %s\n",
4311             ieee80211_state_name[vap->iv_state],
4312             ieee80211_state_name[nstate]);
4313         IEEE80211_UNLOCK(ic);
4314         IWM_LOCK(sc);
4315
4316         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4317                 iwm_led_blink_stop(sc);
4318
4319         /* disable beacon filtering if we're hopping out of RUN */
4320         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4321                 iwm_mvm_disable_beacon_filter(sc);
4322
4323                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4324                         in->in_assoc = 0;
4325
4326                 if (nstate == IEEE80211_S_INIT) {
4327                         IWM_UNLOCK(sc);
4328                         IEEE80211_LOCK(ic);
4329                         error = ivp->iv_newstate(vap, nstate, arg);
4330                         IEEE80211_UNLOCK(ic);
4331                         IWM_LOCK(sc);
4332                         iwm_release(sc, NULL);
4333                         IWM_UNLOCK(sc);
4334                         IEEE80211_LOCK(ic);
4335                         return error;
4336                 }
4337
4338                 /*
4339                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4340                  * above then the card will be completely reinitialized,
4341                  * so the driver must do everything necessary to bring the card
4342                  * from INIT to SCAN.
4343                  *
4344                  * Additionally, upon receiving deauth frame from AP,
4345                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4346                  * state. This will also fail with this driver, so bring the FSM
4347                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4348                  *
4349                  * XXX TODO: fix this for FreeBSD!
4350                  */
4351                 if (nstate == IEEE80211_S_SCAN ||
4352                     nstate == IEEE80211_S_AUTH ||
4353                     nstate == IEEE80211_S_ASSOC) {
4354                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4355                             "Force transition to INIT; MGT=%d\n", arg);
4356                         IWM_UNLOCK(sc);
4357                         IEEE80211_LOCK(ic);
4358                         /* Always pass arg as -1 since we can't Tx right now. */
4359                         /*
4360                          * XXX arg is just ignored anyway when transitioning
4361                          *     to IEEE80211_S_INIT.
4362                          */
4363                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4364                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4365                             "Going INIT->SCAN\n");
4366                         nstate = IEEE80211_S_SCAN;
4367                         IEEE80211_UNLOCK(ic);
4368                         IWM_LOCK(sc);
4369                 }
4370         }
4371
4372         switch (nstate) {
4373         case IEEE80211_S_INIT:
4374                 break;
4375
4376         case IEEE80211_S_AUTH:
4377                 if ((error = iwm_auth(vap, sc)) != 0) {
4378                         device_printf(sc->sc_dev,
4379                             "%s: could not move to auth state: %d\n",
4380                             __func__, error);
4381                         break;
4382                 }
4383                 break;
4384
4385         case IEEE80211_S_ASSOC:
4386                 if ((error = iwm_assoc(vap, sc)) != 0) {
4387                         device_printf(sc->sc_dev,
4388                             "%s: failed to associate: %d\n", __func__,
4389                             error);
4390                         break;
4391                 }
4392                 break;
4393
4394         case IEEE80211_S_RUN:
4395         {
4396                 struct iwm_host_cmd cmd = {
4397                         .id = IWM_LQ_CMD,
4398                         .len = { sizeof(in->in_lq), },
4399                         .flags = IWM_CMD_SYNC,
4400                 };
4401
4402                 /* Update the association state, now we have it all */
4403                 /* (eg associd comes in at this point */
4404                 error = iwm_assoc(vap, sc);
4405                 if (error != 0) {
4406                         device_printf(sc->sc_dev,
4407                             "%s: failed to update association state: %d\n",
4408                             __func__,
4409                             error);
4410                         break;
4411                 }
4412
4413                 in = IWM_NODE(vap->iv_bss);
4414                 iwm_mvm_power_mac_update_mode(sc, in);
4415                 iwm_mvm_enable_beacon_filter(sc, in);
4416                 iwm_mvm_update_quotas(sc, in);
4417                 iwm_setrates(sc, in);
4418
4419                 cmd.data[0] = &in->in_lq;
4420                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4421                         device_printf(sc->sc_dev,
4422                             "%s: IWM_LQ_CMD failed\n", __func__);
4423                 }
4424
4425                 iwm_mvm_led_enable(sc);
4426                 break;
4427         }
4428
4429         default:
4430                 break;
4431         }
4432         IWM_UNLOCK(sc);
4433         IEEE80211_LOCK(ic);
4434
4435         return (ivp->iv_newstate(vap, nstate, arg));
4436 }
4437
4438 void
4439 iwm_endscan_cb(void *arg, int pending)
4440 {
4441         struct iwm_softc *sc = arg;
4442         struct ieee80211com *ic = &sc->sc_ic;
4443
4444         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4445             "%s: scan ended\n",
4446             __func__);
4447
4448         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4449 }
4450
4451 /*
4452  * Aging and idle timeouts for the different possible scenarios
4453  * in default configuration
4454  */
4455 static const uint32_t
4456 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4457         {
4458                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4459                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4460         },
4461         {
4462                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4463                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4464         },
4465         {
4466                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4467                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4468         },
4469         {
4470                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4471                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4472         },
4473         {
4474                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4475                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4476         },
4477 };
4478
4479 /*
4480  * Aging and idle timeouts for the different possible scenarios
4481  * in single BSS MAC configuration.
4482  */
4483 static const uint32_t
4484 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4485         {
4486                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4487                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4488         },
4489         {
4490                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4491                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4492         },
4493         {
4494                 htole32(IWM_SF_MCAST_AGING_TIMER),
4495                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4496         },
4497         {
4498                 htole32(IWM_SF_BA_AGING_TIMER),
4499                 htole32(IWM_SF_BA_IDLE_TIMER)
4500         },
4501         {
4502                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4503                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4504         },
4505 };
4506
4507 static void
4508 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4509     struct ieee80211_node *ni)
4510 {
4511         int i, j, watermark;
4512
4513         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4514
4515         /*
4516          * If we are in association flow - check antenna configuration
4517          * capabilities of the AP station, and choose the watermark accordingly.
4518          */
4519         if (ni) {
4520                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4521 #ifdef notyet
4522                         if (ni->ni_rxmcs[2] != 0)
4523                                 watermark = IWM_SF_W_MARK_MIMO3;
4524                         else if (ni->ni_rxmcs[1] != 0)
4525                                 watermark = IWM_SF_W_MARK_MIMO2;
4526                         else
4527 #endif
4528                                 watermark = IWM_SF_W_MARK_SISO;
4529                 } else {
4530                         watermark = IWM_SF_W_MARK_LEGACY;
4531                 }
4532         /* default watermark value for unassociated mode. */
4533         } else {
4534                 watermark = IWM_SF_W_MARK_MIMO2;
4535         }
4536         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4537
4538         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4539                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4540                         sf_cmd->long_delay_timeouts[i][j] =
4541                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4542                 }
4543         }
4544
4545         if (ni) {
4546                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4547                        sizeof(iwm_sf_full_timeout));
4548         } else {
4549                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4550                        sizeof(iwm_sf_full_timeout_def));
4551         }
4552 }
4553
4554 static int
4555 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4556 {
4557         struct ieee80211com *ic = &sc->sc_ic;
4558         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4559         struct iwm_sf_cfg_cmd sf_cmd = {
4560                 .state = htole32(IWM_SF_FULL_ON),
4561         };
4562         int ret = 0;
4563
4564         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4565                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4566
4567         switch (new_state) {
4568         case IWM_SF_UNINIT:
4569         case IWM_SF_INIT_OFF:
4570                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4571                 break;
4572         case IWM_SF_FULL_ON:
4573                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4574                 break;
4575         default:
4576                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4577                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4578                           new_state);
4579                 return EINVAL;
4580         }
4581
4582         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4583                                    sizeof(sf_cmd), &sf_cmd);
4584         return ret;
4585 }
4586
4587 static int
4588 iwm_send_bt_init_conf(struct iwm_softc *sc)
4589 {
4590         struct iwm_bt_coex_cmd bt_cmd;
4591
4592         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4593         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4594
4595         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4596             &bt_cmd);
4597 }
4598
4599 static int
4600 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4601 {
4602         struct iwm_mcc_update_cmd mcc_cmd;
4603         struct iwm_host_cmd hcmd = {
4604                 .id = IWM_MCC_UPDATE_CMD,
4605                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4606                 .data = { &mcc_cmd },
4607         };
4608         int ret;
4609 #ifdef IWM_DEBUG
4610         struct iwm_rx_packet *pkt;
4611         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4612         struct iwm_mcc_update_resp *mcc_resp;
4613         int n_channels;
4614         uint16_t mcc;
4615 #endif
4616         int resp_v2 = isset(sc->sc_enabled_capa,
4617             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4618
4619         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4620         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4621         if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4622             isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4623                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4624         else
4625                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4626
4627         if (resp_v2)
4628                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4629         else
4630                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4631
4632         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4633             "send MCC update to FW with '%c%c' src = %d\n",
4634             alpha2[0], alpha2[1], mcc_cmd.source_id);
4635
4636         ret = iwm_send_cmd(sc, &hcmd);
4637         if (ret)
4638                 return ret;
4639
4640 #ifdef IWM_DEBUG
4641         pkt = hcmd.resp_pkt;
4642
4643         /* Extract MCC response */
4644         if (resp_v2) {
4645                 mcc_resp = (void *)pkt->data;
4646                 mcc = mcc_resp->mcc;
4647                 n_channels =  le32toh(mcc_resp->n_channels);
4648         } else {
4649                 mcc_resp_v1 = (void *)pkt->data;
4650                 mcc = mcc_resp_v1->mcc;
4651                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4652         }
4653
4654         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4655         if (mcc == 0)
4656                 mcc = 0x3030;  /* "00" - world */
4657
4658         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4659             "regulatory domain '%c%c' (%d channels available)\n",
4660             mcc >> 8, mcc & 0xff, n_channels);
4661 #endif
4662         iwm_free_resp(sc, &hcmd);
4663
4664         return 0;
4665 }
4666
4667 static void
4668 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4669 {
4670         struct iwm_host_cmd cmd = {
4671                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4672                 .len = { sizeof(uint32_t), },
4673                 .data = { &backoff, },
4674         };
4675
4676         if (iwm_send_cmd(sc, &cmd) != 0) {
4677                 device_printf(sc->sc_dev,
4678                     "failed to change thermal tx backoff\n");
4679         }
4680 }
4681
4682 static int
4683 iwm_init_hw(struct iwm_softc *sc)
4684 {
4685         struct ieee80211com *ic = &sc->sc_ic;
4686         int error, i, ac;
4687
4688         if ((error = iwm_start_hw(sc)) != 0) {
4689                 printf("iwm_start_hw: failed %d\n", error);
4690                 return error;
4691         }
4692
4693         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4694                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4695                 return error;
4696         }
4697
4698         /*
4699          * should stop and start HW since that INIT
4700          * image just loaded
4701          */
4702         iwm_stop_device(sc);
4703         if ((error = iwm_start_hw(sc)) != 0) {
4704                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4705                 return error;
4706         }
4707
4708         /* omstart, this time with the regular firmware */
4709         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
4710         if (error) {
4711                 device_printf(sc->sc_dev, "could not load firmware\n");
4712                 goto error;
4713         }
4714
4715         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4716                 device_printf(sc->sc_dev, "bt init conf failed\n");
4717                 goto error;
4718         }
4719
4720         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4721         if (error != 0) {
4722                 device_printf(sc->sc_dev, "antenna config failed\n");
4723                 goto error;
4724         }
4725
4726         /* Send phy db control command and then phy db calibration */
4727         if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4728                 goto error;
4729
4730         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4731                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4732                 goto error;
4733         }
4734
4735         /* Add auxiliary station for scanning */
4736         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4737                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4738                 goto error;
4739         }
4740
4741         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4742                 /*
4743                  * The channel used here isn't relevant as it's
4744                  * going to be overwritten in the other flows.
4745                  * For now use the first channel we have.
4746                  */
4747                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4748                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4749                         goto error;
4750         }
4751
4752         /* Initialize tx backoffs to the minimum. */
4753         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4754                 iwm_mvm_tt_tx_backoff(sc, 0);
4755
4756         error = iwm_mvm_power_update_device(sc);
4757         if (error)
4758                 goto error;
4759
4760         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4761                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4762                         goto error;
4763         }
4764
4765         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4766                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4767                         goto error;
4768         }
4769
4770         /* Enable Tx queues. */
4771         for (ac = 0; ac < WME_NUM_AC; ac++) {
4772                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4773                     iwm_mvm_ac_to_tx_fifo[ac]);
4774                 if (error)
4775                         goto error;
4776         }
4777
4778         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4779                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4780                 goto error;
4781         }
4782
4783         return 0;
4784
4785  error:
4786         iwm_stop_device(sc);
4787         return error;
4788 }
4789
4790 /* Allow multicast from our BSSID. */
4791 static int
4792 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4793 {
4794         struct ieee80211_node *ni = vap->iv_bss;
4795         struct iwm_mcast_filter_cmd *cmd;
4796         size_t size;
4797         int error;
4798
4799         size = roundup(sizeof(*cmd), 4);
4800         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4801         if (cmd == NULL)
4802                 return ENOMEM;
4803         cmd->filter_own = 1;
4804         cmd->port_id = 0;
4805         cmd->count = 0;
4806         cmd->pass_all = 1;
4807         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4808
4809         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4810             IWM_CMD_SYNC, size, cmd);
4811         free(cmd, M_DEVBUF);
4812
4813         return (error);
4814 }
4815
4816 /*
4817  * ifnet interfaces
4818  */
4819
4820 static void
4821 iwm_init(struct iwm_softc *sc)
4822 {
4823         int error;
4824
4825         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4826                 return;
4827         }
4828         sc->sc_generation++;
4829         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4830
4831         if ((error = iwm_init_hw(sc)) != 0) {
4832                 printf("iwm_init_hw failed %d\n", error);
4833                 iwm_stop(sc);
4834                 return;
4835         }
4836
4837         /*
4838          * Ok, firmware loaded and we are jogging
4839          */
4840         sc->sc_flags |= IWM_FLAG_HW_INITED;
4841         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4842 }
4843
4844 static int
4845 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4846 {
4847         struct iwm_softc *sc;
4848         int error;
4849
4850         sc = ic->ic_softc;
4851
4852         IWM_LOCK(sc);
4853         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4854                 IWM_UNLOCK(sc);
4855                 return (ENXIO);
4856         }
4857         error = mbufq_enqueue(&sc->sc_snd, m);
4858         if (error) {
4859                 IWM_UNLOCK(sc);
4860                 return (error);
4861         }
4862         iwm_start(sc);
4863         IWM_UNLOCK(sc);
4864         return (0);
4865 }
4866
4867 /*
4868  * Dequeue packets from sendq and call send.
4869  */
4870 static void
4871 iwm_start(struct iwm_softc *sc)
4872 {
4873         struct ieee80211_node *ni;
4874         struct mbuf *m;
4875         int ac = 0;
4876
4877         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4878         while (sc->qfullmsk == 0 &&
4879                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4880                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4881                 if (iwm_tx(sc, m, ni, ac) != 0) {
4882                         if_inc_counter(ni->ni_vap->iv_ifp,
4883                             IFCOUNTER_OERRORS, 1);
4884                         ieee80211_free_node(ni);
4885                         continue;
4886                 }
4887                 sc->sc_tx_timer = 15;
4888         }
4889         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4890 }
4891
4892 static void
4893 iwm_stop(struct iwm_softc *sc)
4894 {
4895
4896         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4897         sc->sc_flags |= IWM_FLAG_STOPPED;
4898         sc->sc_generation++;
4899         iwm_led_blink_stop(sc);
4900         sc->sc_tx_timer = 0;
4901         iwm_stop_device(sc);
4902 }
4903
4904 static void
4905 iwm_watchdog(void *arg)
4906 {
4907         struct iwm_softc *sc = arg;
4908         struct ieee80211com *ic = &sc->sc_ic;
4909
4910         if (sc->sc_tx_timer > 0) {
4911                 if (--sc->sc_tx_timer == 0) {
4912                         device_printf(sc->sc_dev, "device timeout\n");
4913 #ifdef IWM_DEBUG
4914                         iwm_nic_error(sc);
4915 #endif
4916                         ieee80211_restart_all(ic);
4917                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4918                         return;
4919                 }
4920         }
4921         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4922 }
4923
4924 static void
4925 iwm_parent(struct ieee80211com *ic)
4926 {
4927         struct iwm_softc *sc = ic->ic_softc;
4928         int startall = 0;
4929
4930         IWM_LOCK(sc);
4931         if (ic->ic_nrunning > 0) {
4932                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4933                         iwm_init(sc);
4934                         startall = 1;
4935                 }
4936         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4937                 iwm_stop(sc);
4938         IWM_UNLOCK(sc);
4939         if (startall)
4940                 ieee80211_start_all(ic);
4941 }
4942
4943 /*
4944  * The interrupt side of things
4945  */
4946
4947 /*
4948  * error dumping routines are from iwlwifi/mvm/utils.c
4949  */
4950
4951 /*
4952  * Note: This structure is read from the device with IO accesses,
4953  * and the reading already does the endian conversion. As it is
4954  * read with uint32_t-sized accesses, any members with a different size
4955  * need to be ordered correctly though!
4956  */
4957 struct iwm_error_event_table {
4958         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4959         uint32_t error_id;              /* type of error */
4960         uint32_t trm_hw_status0;        /* TRM HW status */
4961         uint32_t trm_hw_status1;        /* TRM HW status */
4962         uint32_t blink2;                /* branch link */
4963         uint32_t ilink1;                /* interrupt link */
4964         uint32_t ilink2;                /* interrupt link */
4965         uint32_t data1;         /* error-specific data */
4966         uint32_t data2;         /* error-specific data */
4967         uint32_t data3;         /* error-specific data */
4968         uint32_t bcon_time;             /* beacon timer */
4969         uint32_t tsf_low;               /* network timestamp function timer */
4970         uint32_t tsf_hi;                /* network timestamp function timer */
4971         uint32_t gp1;           /* GP1 timer register */
4972         uint32_t gp2;           /* GP2 timer register */
4973         uint32_t fw_rev_type;   /* firmware revision type */
4974         uint32_t major;         /* uCode version major */
4975         uint32_t minor;         /* uCode version minor */
4976         uint32_t hw_ver;                /* HW Silicon version */
4977         uint32_t brd_ver;               /* HW board version */
4978         uint32_t log_pc;                /* log program counter */
4979         uint32_t frame_ptr;             /* frame pointer */
4980         uint32_t stack_ptr;             /* stack pointer */
4981         uint32_t hcmd;          /* last host command header */
4982         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4983                                  * rxtx_flag */
4984         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4985                                  * host_flag */
4986         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4987                                  * enc_flag */
4988         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4989                                  * time_flag */
4990         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4991                                  * wico interrupt */
4992         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
4993         uint32_t wait_event;            /* wait event() caller address */
4994         uint32_t l2p_control;   /* L2pControlField */
4995         uint32_t l2p_duration;  /* L2pDurationField */
4996         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4997         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4998         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4999                                  * (LMPM_PMG_SEL) */
5000         uint32_t u_timestamp;   /* indicate when the date and time of the
5001                                  * compilation */
5002         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
5003 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5004
5005 /*
5006  * UMAC error struct - relevant starting from family 8000 chip.
5007  * Note: This structure is read from the device with IO accesses,
5008  * and the reading already does the endian conversion. As it is
5009  * read with u32-sized accesses, any members with a different size
5010  * need to be ordered correctly though!
5011  */
5012 struct iwm_umac_error_event_table {
5013         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5014         uint32_t error_id;      /* type of error */
5015         uint32_t blink1;        /* branch link */
5016         uint32_t blink2;        /* branch link */
5017         uint32_t ilink1;        /* interrupt link */
5018         uint32_t ilink2;        /* interrupt link */
5019         uint32_t data1;         /* error-specific data */
5020         uint32_t data2;         /* error-specific data */
5021         uint32_t data3;         /* error-specific data */
5022         uint32_t umac_major;
5023         uint32_t umac_minor;
5024         uint32_t frame_pointer; /* core register 27*/
5025         uint32_t stack_pointer; /* core register 28 */
5026         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
5027         uint32_t nic_isr_pref;  /* ISR status register */
5028 } __packed;
5029
5030 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5031 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5032
5033 #ifdef IWM_DEBUG
5034 struct {
5035         const char *name;
5036         uint8_t num;
5037 } advanced_lookup[] = {
5038         { "NMI_INTERRUPT_WDG", 0x34 },
5039         { "SYSASSERT", 0x35 },
5040         { "UCODE_VERSION_MISMATCH", 0x37 },
5041         { "BAD_COMMAND", 0x38 },
5042         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5043         { "FATAL_ERROR", 0x3D },
5044         { "NMI_TRM_HW_ERR", 0x46 },
5045         { "NMI_INTERRUPT_TRM", 0x4C },
5046         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5047         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5048         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5049         { "NMI_INTERRUPT_HOST", 0x66 },
5050         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5051         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5052         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5053         { "ADVANCED_SYSASSERT", 0 },
5054 };
5055
5056 static const char *
5057 iwm_desc_lookup(uint32_t num)
5058 {
5059         int i;
5060
5061         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5062                 if (advanced_lookup[i].num == num)
5063                         return advanced_lookup[i].name;
5064
5065         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5066         return advanced_lookup[i].name;
5067 }
5068
5069 static void
5070 iwm_nic_umac_error(struct iwm_softc *sc)
5071 {
5072         struct iwm_umac_error_event_table table;
5073         uint32_t base;
5074
5075         base = sc->sc_uc.uc_umac_error_event_table;
5076
5077         if (base < 0x800000) {
5078                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5079                     base);
5080                 return;
5081         }
5082
5083         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5084                 device_printf(sc->sc_dev, "reading errlog failed\n");
5085                 return;
5086         }
5087
5088         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5089                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5090                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5091                     sc->sc_flags, table.valid);
5092         }
5093
5094         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5095                 iwm_desc_lookup(table.error_id));
5096         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5097         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5098         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5099             table.ilink1);
5100         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5101             table.ilink2);
5102         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5103         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5104         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5105         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5106         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5107         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5108             table.frame_pointer);
5109         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5110             table.stack_pointer);
5111         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5112         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5113             table.nic_isr_pref);
5114 }
5115
5116 /*
5117  * Support for dumping the error log seemed like a good idea ...
5118  * but it's mostly hex junk and the only sensible thing is the
5119  * hw/ucode revision (which we know anyway).  Since it's here,
5120  * I'll just leave it in, just in case e.g. the Intel guys want to
5121  * help us decipher some "ADVANCED_SYSASSERT" later.
5122  */
5123 static void
5124 iwm_nic_error(struct iwm_softc *sc)
5125 {
5126         struct iwm_error_event_table table;
5127         uint32_t base;
5128
5129         device_printf(sc->sc_dev, "dumping device error log\n");
5130         base = sc->sc_uc.uc_error_event_table;
5131         if (base < 0x800000) {
5132                 device_printf(sc->sc_dev,
5133                     "Invalid error log pointer 0x%08x\n", base);
5134                 return;
5135         }
5136
5137         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5138                 device_printf(sc->sc_dev, "reading errlog failed\n");
5139                 return;
5140         }
5141
5142         if (!table.valid) {
5143                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5144                 return;
5145         }
5146
5147         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5148                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5149                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5150                     sc->sc_flags, table.valid);
5151         }
5152
5153         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5154             iwm_desc_lookup(table.error_id));
5155         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5156             table.trm_hw_status0);
5157         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5158             table.trm_hw_status1);
5159         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5160         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5161         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5162         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5163         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5164         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5165         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5166         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5167         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5168         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5169         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5170         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5171             table.fw_rev_type);
5172         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5173         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5174         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5175         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5176         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5177         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5178         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5179         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5180         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5181         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5182         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5183         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5184         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5185         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5186         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5187         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5188         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5189         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5190         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5191
5192         if (sc->sc_uc.uc_umac_error_event_table)
5193                 iwm_nic_umac_error(sc);
5194 }
5195 #endif
5196
5197 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5198
5199 /*
5200  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5201  * Basic structure from if_iwn
5202  */
5203 static void
5204 iwm_notif_intr(struct iwm_softc *sc)
5205 {
5206         struct ieee80211com *ic = &sc->sc_ic;
5207         uint16_t hw;
5208
5209         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5210             BUS_DMASYNC_POSTREAD);
5211
5212         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5213
5214         /*
5215          * Process responses
5216          */
5217         while (sc->rxq.cur != hw) {
5218                 struct iwm_rx_ring *ring = &sc->rxq;
5219                 struct iwm_rx_data *data = &ring->data[ring->cur];
5220                 struct iwm_rx_packet *pkt;
5221                 struct iwm_cmd_response *cresp;
5222                 int qid, idx, code;
5223
5224                 bus_dmamap_sync(ring->data_dmat, data->map,
5225                     BUS_DMASYNC_POSTREAD);
5226                 pkt = mtod(data->m, struct iwm_rx_packet *);
5227
5228                 qid = pkt->hdr.qid & ~0x80;
5229                 idx = pkt->hdr.idx;
5230
5231                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5232                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5233                     "rx packet qid=%d idx=%d type=%x %d %d\n",
5234                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5235
5236                 /*
5237                  * randomly get these from the firmware, no idea why.
5238                  * they at least seem harmless, so just ignore them for now
5239                  */
5240                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5241                     || pkt->len_n_flags == htole32(0x55550000))) {
5242                         ADVANCE_RXQ(sc);
5243                         continue;
5244                 }
5245
5246                 switch (code) {
5247                 case IWM_REPLY_RX_PHY_CMD:
5248                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5249                         break;
5250
5251                 case IWM_REPLY_RX_MPDU_CMD:
5252                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5253                         break;
5254
5255                 case IWM_TX_CMD:
5256                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
5257                         break;
5258
5259                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5260                         struct iwm_missed_beacons_notif *resp;
5261                         int missed;
5262
5263                         /* XXX look at mac_id to determine interface ID */
5264                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5265
5266                         resp = (void *)pkt->data;
5267                         missed = le32toh(resp->consec_missed_beacons);
5268
5269                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5270                             "%s: MISSED_BEACON: mac_id=%d, "
5271                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5272                             "num_rx=%d\n",
5273                             __func__,
5274                             le32toh(resp->mac_id),
5275                             le32toh(resp->consec_missed_beacons_since_last_rx),
5276                             le32toh(resp->consec_missed_beacons),
5277                             le32toh(resp->num_expected_beacons),
5278                             le32toh(resp->num_recvd_beacons));
5279
5280                         /* Be paranoid */
5281                         if (vap == NULL)
5282                                 break;
5283
5284                         /* XXX no net80211 locking? */
5285                         if (vap->iv_state == IEEE80211_S_RUN &&
5286                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5287                                 if (missed > vap->iv_bmissthreshold) {
5288                                         /* XXX bad locking; turn into task */
5289                                         IWM_UNLOCK(sc);
5290                                         ieee80211_beacon_miss(ic);
5291                                         IWM_LOCK(sc);
5292                                 }
5293                         }
5294
5295                         break; }
5296
5297                 case IWM_MFUART_LOAD_NOTIFICATION:
5298                         break;
5299
5300                 case IWM_MVM_ALIVE: {
5301                         struct iwm_mvm_alive_resp_v1 *resp1;
5302                         struct iwm_mvm_alive_resp_v2 *resp2;
5303                         struct iwm_mvm_alive_resp_v3 *resp3;
5304
5305                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
5306                                 resp1 = (void *)pkt->data;
5307                                 sc->sc_uc.uc_error_event_table
5308                                     = le32toh(resp1->error_event_table_ptr);
5309                                 sc->sc_uc.uc_log_event_table
5310                                     = le32toh(resp1->log_event_table_ptr);
5311                                 sc->sched_base = le32toh(resp1->scd_base_ptr);
5312                                 if (resp1->status == IWM_ALIVE_STATUS_OK)
5313                                         sc->sc_uc.uc_ok = 1;
5314                                 else
5315                                         sc->sc_uc.uc_ok = 0;
5316                         }
5317
5318                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
5319                                 resp2 = (void *)pkt->data;
5320                                 sc->sc_uc.uc_error_event_table
5321                                     = le32toh(resp2->error_event_table_ptr);
5322                                 sc->sc_uc.uc_log_event_table
5323                                     = le32toh(resp2->log_event_table_ptr);
5324                                 sc->sched_base = le32toh(resp2->scd_base_ptr);
5325                                 sc->sc_uc.uc_umac_error_event_table
5326                                     = le32toh(resp2->error_info_addr);
5327                                 if (resp2->status == IWM_ALIVE_STATUS_OK)
5328                                         sc->sc_uc.uc_ok = 1;
5329                                 else
5330                                         sc->sc_uc.uc_ok = 0;
5331                         }
5332
5333                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
5334                                 resp3 = (void *)pkt->data;
5335                                 sc->sc_uc.uc_error_event_table
5336                                     = le32toh(resp3->error_event_table_ptr);
5337                                 sc->sc_uc.uc_log_event_table
5338                                     = le32toh(resp3->log_event_table_ptr);
5339                                 sc->sched_base = le32toh(resp3->scd_base_ptr);
5340                                 sc->sc_uc.uc_umac_error_event_table
5341                                     = le32toh(resp3->error_info_addr);
5342                                 if (resp3->status == IWM_ALIVE_STATUS_OK)
5343                                         sc->sc_uc.uc_ok = 1;
5344                                 else
5345                                         sc->sc_uc.uc_ok = 0;
5346                         }
5347
5348                         sc->sc_uc.uc_intr = 1;
5349                         wakeup(&sc->sc_uc);
5350                         break; }
5351
5352                 case IWM_CALIB_RES_NOTIF_PHY_DB: {
5353                         struct iwm_calib_res_notif_phy_db *phy_db_notif;
5354                         phy_db_notif = (void *)pkt->data;
5355
5356                         iwm_phy_db_set_section(sc->sc_phy_db, phy_db_notif);
5357
5358                         break; }
5359
5360                 case IWM_STATISTICS_NOTIFICATION: {
5361                         struct iwm_notif_statistics *stats;
5362                         stats = (void *)pkt->data;
5363                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5364                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5365                         break; }
5366
5367                 case IWM_NVM_ACCESS_CMD:
5368                 case IWM_MCC_UPDATE_CMD:
5369                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5370                                 memcpy(sc->sc_cmd_resp,
5371                                     pkt, sizeof(sc->sc_cmd_resp));
5372                         }
5373                         break;
5374
5375                 case IWM_MCC_CHUB_UPDATE_CMD: {
5376                         struct iwm_mcc_chub_notif *notif;
5377                         notif = (void *)pkt->data;
5378
5379                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5380                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5381                         sc->sc_fw_mcc[2] = '\0';
5382                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5383                             "fw source %d sent CC '%s'\n",
5384                             notif->source_id, sc->sc_fw_mcc);
5385                         break; }
5386
5387                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5388                         break;
5389
5390                 case IWM_PHY_CONFIGURATION_CMD:
5391                 case IWM_TX_ANT_CONFIGURATION_CMD:
5392                 case IWM_ADD_STA:
5393                 case IWM_MAC_CONTEXT_CMD:
5394                 case IWM_REPLY_SF_CFG_CMD:
5395                 case IWM_POWER_TABLE_CMD:
5396                 case IWM_PHY_CONTEXT_CMD:
5397                 case IWM_BINDING_CONTEXT_CMD:
5398                 case IWM_TIME_EVENT_CMD:
5399                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5400                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5401                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5402                 case IWM_REPLY_BEACON_FILTERING_CMD:
5403                 case IWM_MAC_PM_POWER_TABLE:
5404                 case IWM_TIME_QUOTA_CMD:
5405                 case IWM_REMOVE_STA:
5406                 case IWM_TXPATH_FLUSH:
5407                 case IWM_LQ_CMD:
5408                 case IWM_BT_CONFIG:
5409                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5410                         cresp = (void *)pkt->data;
5411                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5412                                 memcpy(sc->sc_cmd_resp,
5413                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5414                         }
5415                         break;
5416
5417                 /* ignore */
5418                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5419                         break;
5420
5421                 case IWM_INIT_COMPLETE_NOTIF:
5422                         sc->sc_init_complete = 1;
5423                         wakeup(&sc->sc_init_complete);
5424                         break;
5425
5426                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5427                         struct iwm_periodic_scan_complete *notif;
5428                         notif = (void *)pkt->data;
5429                         break;
5430                 }
5431
5432                 case IWM_SCAN_ITERATION_COMPLETE: {
5433                         struct iwm_lmac_scan_complete_notif *notif;
5434                         notif = (void *)pkt->data;
5435                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5436                         break;
5437                 }
5438  
5439                 case IWM_SCAN_COMPLETE_UMAC: {
5440                         struct iwm_umac_scan_complete *notif;
5441                         notif = (void *)pkt->data;
5442
5443                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5444                             "UMAC scan complete, status=0x%x\n",
5445                             notif->status);
5446 #if 0   /* XXX This would be a duplicate scan end call */
5447                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5448 #endif
5449                         break;
5450                 }
5451
5452                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5453                         struct iwm_umac_scan_iter_complete_notif *notif;
5454                         notif = (void *)pkt->data;
5455
5456                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5457                             "complete, status=0x%x, %d channels scanned\n",
5458                             notif->status, notif->scanned_channels);
5459                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5460                         break;
5461                 }
5462
5463                 case IWM_REPLY_ERROR: {
5464                         struct iwm_error_resp *resp;
5465                         resp = (void *)pkt->data;
5466
5467                         device_printf(sc->sc_dev,
5468                             "firmware error 0x%x, cmd 0x%x\n",
5469                             le32toh(resp->error_type),
5470                             resp->cmd_id);
5471                         break;
5472                 }
5473
5474                 case IWM_TIME_EVENT_NOTIFICATION: {
5475                         struct iwm_time_event_notif *notif;
5476                         notif = (void *)pkt->data;
5477
5478                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5479                             "TE notif status = 0x%x action = 0x%x\n",
5480                             notif->status, notif->action);
5481                         break;
5482                 }
5483
5484                 case IWM_MCAST_FILTER_CMD:
5485                         break;
5486
5487                 case IWM_SCD_QUEUE_CFG: {
5488                         struct iwm_scd_txq_cfg_rsp *rsp;
5489                         rsp = (void *)pkt->data;
5490
5491                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5492                             "queue cfg token=0x%x sta_id=%d "
5493                             "tid=%d scd_queue=%d\n",
5494                             rsp->token, rsp->sta_id, rsp->tid,
5495                             rsp->scd_queue);
5496                         break;
5497                 }
5498
5499                 default:
5500                         device_printf(sc->sc_dev,
5501                             "frame %d/%d %x UNHANDLED (this should "
5502                             "not happen)\n", qid, idx,
5503                             pkt->len_n_flags);
5504                         break;
5505                 }
5506
5507                 /*
5508                  * Why test bit 0x80?  The Linux driver:
5509                  *
5510                  * There is one exception:  uCode sets bit 15 when it
5511                  * originates the response/notification, i.e. when the
5512                  * response/notification is not a direct response to a
5513                  * command sent by the driver.  For example, uCode issues
5514                  * IWM_REPLY_RX when it sends a received frame to the driver;
5515                  * it is not a direct response to any driver command.
5516                  *
5517                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5518                  * uses a slightly different format for pkt->hdr, and "qid"
5519                  * is actually the upper byte of a two-byte field.
5520                  */
5521                 if (!(pkt->hdr.qid & (1 << 7))) {
5522                         iwm_cmd_done(sc, pkt);
5523                 }
5524
5525                 ADVANCE_RXQ(sc);
5526         }
5527
5528         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
5529             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5530
5531         /*
5532          * Tell the firmware what we have processed.
5533          * Seems like the hardware gets upset unless we align
5534          * the write by 8??
5535          */
5536         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5537         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5538 }
5539
5540 static void
5541 iwm_intr(void *arg)
5542 {
5543         struct iwm_softc *sc = arg;
5544         int handled = 0;
5545         int r1, r2, rv = 0;
5546         int isperiodic = 0;
5547
5548         IWM_LOCK(sc);
5549         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5550
5551         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5552                 uint32_t *ict = sc->ict_dma.vaddr;
5553                 int tmp;
5554
5555                 tmp = htole32(ict[sc->ict_cur]);
5556                 if (!tmp)
5557                         goto out_ena;
5558
5559                 /*
5560                  * ok, there was something.  keep plowing until we have all.
5561                  */
5562                 r1 = r2 = 0;
5563                 while (tmp) {
5564                         r1 |= tmp;
5565                         ict[sc->ict_cur] = 0;
5566                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5567                         tmp = htole32(ict[sc->ict_cur]);
5568                 }
5569
5570                 /* this is where the fun begins.  don't ask */
5571                 if (r1 == 0xffffffff)
5572                         r1 = 0;
5573
5574                 /* i am not expected to understand this */
5575                 if (r1 & 0xc0000)
5576                         r1 |= 0x8000;
5577                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5578         } else {
5579                 r1 = IWM_READ(sc, IWM_CSR_INT);
5580                 /* "hardware gone" (where, fishing?) */
5581                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5582                         goto out;
5583                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5584         }
5585         if (r1 == 0 && r2 == 0) {
5586                 goto out_ena;
5587         }
5588
5589         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5590
5591         /* ignored */
5592         handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
5593
5594         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5595                 int i;
5596                 struct ieee80211com *ic = &sc->sc_ic;
5597                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5598
5599 #ifdef IWM_DEBUG
5600                 iwm_nic_error(sc);
5601 #endif
5602                 /* Dump driver status (TX and RX rings) while we're here. */
5603                 device_printf(sc->sc_dev, "driver status:\n");
5604                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5605                         struct iwm_tx_ring *ring = &sc->txq[i];
5606                         device_printf(sc->sc_dev,
5607                             "  tx ring %2d: qid=%-2d cur=%-3d "
5608                             "queued=%-3d\n",
5609                             i, ring->qid, ring->cur, ring->queued);
5610                 }
5611                 device_printf(sc->sc_dev,
5612                     "  rx ring: cur=%d\n", sc->rxq.cur);
5613                 device_printf(sc->sc_dev,
5614                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5615
5616                 /* Don't stop the device; just do a VAP restart */
5617                 IWM_UNLOCK(sc);
5618
5619                 if (vap == NULL) {
5620                         printf("%s: null vap\n", __func__);
5621                         return;
5622                 }
5623
5624                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5625                     "restarting\n", __func__, vap->iv_state);
5626
5627                 /* XXX TODO: turn this into a callout/taskqueue */
5628                 ieee80211_restart_all(ic);
5629                 return;
5630         }
5631
5632         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5633                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5634                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5635                 iwm_stop(sc);
5636                 rv = 1;
5637                 goto out;
5638         }
5639
5640         /* firmware chunk loaded */
5641         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5642                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5643                 handled |= IWM_CSR_INT_BIT_FH_TX;
5644                 sc->sc_fw_chunk_done = 1;
5645                 wakeup(&sc->sc_fw);
5646         }
5647
5648         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5649                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5650                 if (iwm_check_rfkill(sc)) {
5651                         device_printf(sc->sc_dev,
5652                             "%s: rfkill switch, disabling interface\n",
5653                             __func__);
5654                         iwm_stop(sc);
5655                 }
5656         }
5657
5658         /*
5659          * The Linux driver uses periodic interrupts to avoid races.
5660          * We cargo-cult like it's going out of fashion.
5661          */
5662         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5663                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5664                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5665                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5666                         IWM_WRITE_1(sc,
5667                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5668                 isperiodic = 1;
5669         }
5670
5671         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5672                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5673                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5674
5675                 iwm_notif_intr(sc);
5676
5677                 /* enable periodic interrupt, see above */
5678                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5679                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5680                             IWM_CSR_INT_PERIODIC_ENA);
5681         }
5682
5683         if (__predict_false(r1 & ~handled))
5684                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5685                     "%s: unhandled interrupts: %x\n", __func__, r1);
5686         rv = 1;
5687
5688  out_ena:
5689         iwm_restore_interrupts(sc);
5690  out:
5691         IWM_UNLOCK(sc);
5692         return;
5693 }
5694
5695 /*
5696  * Autoconf glue-sniffing
5697  */
5698 #define PCI_VENDOR_INTEL                0x8086
5699 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5700 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5701 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5702 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5703 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5704 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5705 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5706 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5707 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5708 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5709
5710 static const struct iwm_devices {
5711         uint16_t        device;
5712         const char      *name;
5713 } iwm_devices[] = {
5714         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5715         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5716         { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5717         { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5718         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5719         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5720         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5721         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5722         { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5723         { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5724 };
5725
5726 static int
5727 iwm_probe(device_t dev)
5728 {
5729         int i;
5730
5731         for (i = 0; i < nitems(iwm_devices); i++) {
5732                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5733                     pci_get_device(dev) == iwm_devices[i].device) {
5734                         device_set_desc(dev, iwm_devices[i].name);
5735                         return (BUS_PROBE_DEFAULT);
5736                 }
5737         }
5738
5739         return (ENXIO);
5740 }
5741
5742 static int
5743 iwm_dev_check(device_t dev)
5744 {
5745         struct iwm_softc *sc;
5746
5747         sc = device_get_softc(dev);
5748
5749         switch (pci_get_device(dev)) {
5750         case PCI_PRODUCT_INTEL_WL_3160_1:
5751         case PCI_PRODUCT_INTEL_WL_3160_2:
5752                 sc->cfg = &iwm3160_cfg;
5753                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5754                 return (0);
5755         case PCI_PRODUCT_INTEL_WL_3165_1:
5756         case PCI_PRODUCT_INTEL_WL_3165_2:
5757                 sc->cfg = &iwm3165_cfg;
5758                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5759                 return (0);
5760         case PCI_PRODUCT_INTEL_WL_7260_1:
5761         case PCI_PRODUCT_INTEL_WL_7260_2:
5762                 sc->cfg = &iwm7260_cfg;
5763                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5764                 return (0);
5765         case PCI_PRODUCT_INTEL_WL_7265_1:
5766         case PCI_PRODUCT_INTEL_WL_7265_2:
5767                 sc->cfg = &iwm7265_cfg;
5768                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5769                 return (0);
5770         case PCI_PRODUCT_INTEL_WL_8260_1:
5771         case PCI_PRODUCT_INTEL_WL_8260_2:
5772                 sc->cfg = &iwm8260_cfg;
5773                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
5774                 return (0);
5775         default:
5776                 device_printf(dev, "unknown adapter type\n");
5777                 return ENXIO;
5778         }
5779 }
5780
5781 static int
5782 iwm_pci_attach(device_t dev)
5783 {
5784         struct iwm_softc *sc;
5785         int count, error, rid;
5786         uint16_t reg;
5787
5788         sc = device_get_softc(dev);
5789
5790         /* Clear device-specific "PCI retry timeout" register (41h). */
5791         reg = pci_read_config(dev, 0x40, sizeof(reg));
5792         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5793
5794         /* Enable bus-mastering and hardware bug workaround. */
5795         pci_enable_busmaster(dev);
5796         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5797         /* if !MSI */
5798         if (reg & PCIM_STATUS_INTxSTATE) {
5799                 reg &= ~PCIM_STATUS_INTxSTATE;
5800         }
5801         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5802
5803         rid = PCIR_BAR(0);
5804         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5805             RF_ACTIVE);
5806         if (sc->sc_mem == NULL) {
5807                 device_printf(sc->sc_dev, "can't map mem space\n");
5808                 return (ENXIO);
5809         }
5810         sc->sc_st = rman_get_bustag(sc->sc_mem);
5811         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5812
5813         /* Install interrupt handler. */
5814         count = 1;
5815         rid = 0;
5816         if (pci_alloc_msi(dev, &count) == 0)
5817                 rid = 1;
5818         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5819             (rid != 0 ? 0 : RF_SHAREABLE));
5820         if (sc->sc_irq == NULL) {
5821                 device_printf(dev, "can't map interrupt\n");
5822                         return (ENXIO);
5823         }
5824         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5825             NULL, iwm_intr, sc, &sc->sc_ih);
5826         if (sc->sc_ih == NULL) {
5827                 device_printf(dev, "can't establish interrupt");
5828                         return (ENXIO);
5829         }
5830         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5831
5832         return (0);
5833 }
5834
5835 static void
5836 iwm_pci_detach(device_t dev)
5837 {
5838         struct iwm_softc *sc = device_get_softc(dev);
5839
5840         if (sc->sc_irq != NULL) {
5841                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5842                 bus_release_resource(dev, SYS_RES_IRQ,
5843                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5844                 pci_release_msi(dev);
5845         }
5846         if (sc->sc_mem != NULL)
5847                 bus_release_resource(dev, SYS_RES_MEMORY,
5848                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5849 }
5850
5851
5852
5853 static int
5854 iwm_attach(device_t dev)
5855 {
5856         struct iwm_softc *sc = device_get_softc(dev);
5857         struct ieee80211com *ic = &sc->sc_ic;
5858         int error;
5859         int txq_i, i;
5860
5861         sc->sc_dev = dev;
5862         sc->sc_attached = 1;
5863         IWM_LOCK_INIT(sc);
5864         mbufq_init(&sc->sc_snd, ifqmaxlen);
5865         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5866         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5867         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5868
5869         /* Init phy db */
5870         sc->sc_phy_db = iwm_phy_db_init(sc);
5871         if (!sc->sc_phy_db) {
5872                 device_printf(dev, "Cannot init phy_db\n");
5873                 goto fail;
5874         }
5875
5876         /* PCI attach */
5877         error = iwm_pci_attach(dev);
5878         if (error != 0)
5879                 goto fail;
5880
5881         sc->sc_wantresp = -1;
5882
5883         /* Check device type */
5884         error = iwm_dev_check(dev);
5885         if (error != 0)
5886                 goto fail;
5887
5888         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5889         /*
5890          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5891          * changed, and now the revision step also includes bit 0-1 (no more
5892          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5893          * in the old format.
5894          */
5895         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
5896                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5897                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5898
5899         if (iwm_prepare_card_hw(sc) != 0) {
5900                 device_printf(dev, "could not initialize hardware\n");
5901                 goto fail;
5902         }
5903
5904         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5905                 int ret;
5906                 uint32_t hw_step;
5907
5908                 /*
5909                  * In order to recognize C step the driver should read the
5910                  * chip version id located at the AUX bus MISC address.
5911                  */
5912                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5913                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5914                 DELAY(2);
5915
5916                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5917                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5918                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5919                                    25000);
5920                 if (!ret) {
5921                         device_printf(sc->sc_dev,
5922                             "Failed to wake up the nic\n");
5923                         goto fail;
5924                 }
5925
5926                 if (iwm_nic_lock(sc)) {
5927                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5928                         hw_step |= IWM_ENABLE_WFPM;
5929                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5930                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5931                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5932                         if (hw_step == 0x3)
5933                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5934                                                 (IWM_SILICON_C_STEP << 2);
5935                         iwm_nic_unlock(sc);
5936                 } else {
5937                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
5938                         goto fail;
5939                 }
5940         }
5941
5942         /* special-case 7265D, it has the same PCI IDs. */
5943         if (sc->cfg == &iwm7265_cfg &&
5944             (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5945                 sc->cfg = &iwm7265d_cfg;
5946         }
5947
5948         /* Allocate DMA memory for firmware transfers. */
5949         if ((error = iwm_alloc_fwmem(sc)) != 0) {
5950                 device_printf(dev, "could not allocate memory for firmware\n");
5951                 goto fail;
5952         }
5953
5954         /* Allocate "Keep Warm" page. */
5955         if ((error = iwm_alloc_kw(sc)) != 0) {
5956                 device_printf(dev, "could not allocate keep warm page\n");
5957                 goto fail;
5958         }
5959
5960         /* We use ICT interrupts */
5961         if ((error = iwm_alloc_ict(sc)) != 0) {
5962                 device_printf(dev, "could not allocate ICT table\n");
5963                 goto fail;
5964         }
5965
5966         /* Allocate TX scheduler "rings". */
5967         if ((error = iwm_alloc_sched(sc)) != 0) {
5968                 device_printf(dev, "could not allocate TX scheduler rings\n");
5969                 goto fail;
5970         }
5971
5972         /* Allocate TX rings */
5973         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5974                 if ((error = iwm_alloc_tx_ring(sc,
5975                     &sc->txq[txq_i], txq_i)) != 0) {
5976                         device_printf(dev,
5977                             "could not allocate TX ring %d\n",
5978                             txq_i);
5979                         goto fail;
5980                 }
5981         }
5982
5983         /* Allocate RX ring. */
5984         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5985                 device_printf(dev, "could not allocate RX ring\n");
5986                 goto fail;
5987         }
5988
5989         /* Clear pending interrupts. */
5990         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5991
5992         ic->ic_softc = sc;
5993         ic->ic_name = device_get_nameunit(sc->sc_dev);
5994         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
5995         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
5996
5997         /* Set device capabilities. */
5998         ic->ic_caps =
5999             IEEE80211_C_STA |
6000             IEEE80211_C_WPA |           /* WPA/RSN */
6001             IEEE80211_C_WME |
6002             IEEE80211_C_SHSLOT |        /* short slot time supported */
6003             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
6004 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
6005             ;
6006         /* Advertise full-offload scanning */
6007         ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6008         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6009                 sc->sc_phyctxt[i].id = i;
6010                 sc->sc_phyctxt[i].color = 0;
6011                 sc->sc_phyctxt[i].ref = 0;
6012                 sc->sc_phyctxt[i].channel = NULL;
6013         }
6014
6015         /* Default noise floor */
6016         sc->sc_noise = -96;
6017
6018         /* Max RSSI */
6019         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6020
6021         sc->sc_preinit_hook.ich_func = iwm_preinit;
6022         sc->sc_preinit_hook.ich_arg = sc;
6023         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6024                 device_printf(dev, "config_intrhook_establish failed\n");
6025                 goto fail;
6026         }
6027
6028 #ifdef IWM_DEBUG
6029         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6030             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6031             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6032 #endif
6033
6034         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6035             "<-%s\n", __func__);
6036
6037         return 0;
6038
6039         /* Free allocated memory if something failed during attachment. */
6040 fail:
6041         iwm_detach_local(sc, 0);
6042
6043         return ENXIO;
6044 }
6045
6046 static int
6047 iwm_is_valid_ether_addr(uint8_t *addr)
6048 {
6049         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6050
6051         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6052                 return (FALSE);
6053
6054         return (TRUE);
6055 }
6056
6057 static int
6058 iwm_update_edca(struct ieee80211com *ic)
6059 {
6060         struct iwm_softc *sc = ic->ic_softc;
6061
6062         device_printf(sc->sc_dev, "%s: called\n", __func__);
6063         return (0);
6064 }
6065
6066 static void
6067 iwm_preinit(void *arg)
6068 {
6069         struct iwm_softc *sc = arg;
6070         device_t dev = sc->sc_dev;
6071         struct ieee80211com *ic = &sc->sc_ic;
6072         int error;
6073
6074         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6075             "->%s\n", __func__);
6076
6077         IWM_LOCK(sc);
6078         if ((error = iwm_start_hw(sc)) != 0) {
6079                 device_printf(dev, "could not initialize hardware\n");
6080                 IWM_UNLOCK(sc);
6081                 goto fail;
6082         }
6083
6084         error = iwm_run_init_mvm_ucode(sc, 1);
6085         iwm_stop_device(sc);
6086         if (error) {
6087                 IWM_UNLOCK(sc);
6088                 goto fail;
6089         }
6090         device_printf(dev,
6091             "hw rev 0x%x, fw ver %s, address %s\n",
6092             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6093             sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6094
6095         /* not all hardware can do 5GHz band */
6096         if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6097                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6098                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6099         IWM_UNLOCK(sc);
6100
6101         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6102             ic->ic_channels);
6103
6104         /*
6105          * At this point we've committed - if we fail to do setup,
6106          * we now also have to tear down the net80211 state.
6107          */
6108         ieee80211_ifattach(ic);
6109         ic->ic_vap_create = iwm_vap_create;
6110         ic->ic_vap_delete = iwm_vap_delete;
6111         ic->ic_raw_xmit = iwm_raw_xmit;
6112         ic->ic_node_alloc = iwm_node_alloc;
6113         ic->ic_scan_start = iwm_scan_start;
6114         ic->ic_scan_end = iwm_scan_end;
6115         ic->ic_update_mcast = iwm_update_mcast;
6116         ic->ic_getradiocaps = iwm_init_channel_map;
6117         ic->ic_set_channel = iwm_set_channel;
6118         ic->ic_scan_curchan = iwm_scan_curchan;
6119         ic->ic_scan_mindwell = iwm_scan_mindwell;
6120         ic->ic_wme.wme_update = iwm_update_edca;
6121         ic->ic_parent = iwm_parent;
6122         ic->ic_transmit = iwm_transmit;
6123         iwm_radiotap_attach(sc);
6124         if (bootverbose)
6125                 ieee80211_announce(ic);
6126
6127         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6128             "<-%s\n", __func__);
6129         config_intrhook_disestablish(&sc->sc_preinit_hook);
6130
6131         return;
6132 fail:
6133         config_intrhook_disestablish(&sc->sc_preinit_hook);
6134         iwm_detach_local(sc, 0);
6135 }
6136
6137 /*
6138  * Attach the interface to 802.11 radiotap.
6139  */
6140 static void
6141 iwm_radiotap_attach(struct iwm_softc *sc)
6142 {
6143         struct ieee80211com *ic = &sc->sc_ic;
6144
6145         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6146             "->%s begin\n", __func__);
6147         ieee80211_radiotap_attach(ic,
6148             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6149                 IWM_TX_RADIOTAP_PRESENT,
6150             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6151                 IWM_RX_RADIOTAP_PRESENT);
6152         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6153             "->%s end\n", __func__);
6154 }
6155
6156 static struct ieee80211vap *
6157 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6158     enum ieee80211_opmode opmode, int flags,
6159     const uint8_t bssid[IEEE80211_ADDR_LEN],
6160     const uint8_t mac[IEEE80211_ADDR_LEN])
6161 {
6162         struct iwm_vap *ivp;
6163         struct ieee80211vap *vap;
6164
6165         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6166                 return NULL;
6167         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6168         vap = &ivp->iv_vap;
6169         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6170         vap->iv_bmissthreshold = 10;            /* override default */
6171         /* Override with driver methods. */
6172         ivp->iv_newstate = vap->iv_newstate;
6173         vap->iv_newstate = iwm_newstate;
6174
6175         ieee80211_ratectl_init(vap);
6176         /* Complete setup. */
6177         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6178             mac);
6179         ic->ic_opmode = opmode;
6180
6181         return vap;
6182 }
6183
6184 static void
6185 iwm_vap_delete(struct ieee80211vap *vap)
6186 {
6187         struct iwm_vap *ivp = IWM_VAP(vap);
6188
6189         ieee80211_ratectl_deinit(vap);
6190         ieee80211_vap_detach(vap);
6191         free(ivp, M_80211_VAP);
6192 }
6193
6194 static void
6195 iwm_scan_start(struct ieee80211com *ic)
6196 {
6197         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6198         struct iwm_softc *sc = ic->ic_softc;
6199         int error;
6200
6201         IWM_LOCK(sc);
6202         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6203                 error = iwm_mvm_umac_scan(sc);
6204         else
6205                 error = iwm_mvm_lmac_scan(sc);
6206         if (error != 0) {
6207                 device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
6208                 IWM_UNLOCK(sc);
6209                 ieee80211_cancel_scan(vap);
6210         } else {
6211                 iwm_led_blink_start(sc);
6212                 IWM_UNLOCK(sc);
6213         }
6214 }
6215
6216 static void
6217 iwm_scan_end(struct ieee80211com *ic)
6218 {
6219         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6220         struct iwm_softc *sc = ic->ic_softc;
6221
6222         IWM_LOCK(sc);
6223         iwm_led_blink_stop(sc);
6224         if (vap->iv_state == IEEE80211_S_RUN)
6225                 iwm_mvm_led_enable(sc);
6226         IWM_UNLOCK(sc);
6227 }
6228
6229 static void
6230 iwm_update_mcast(struct ieee80211com *ic)
6231 {
6232 }
6233
6234 static void
6235 iwm_set_channel(struct ieee80211com *ic)
6236 {
6237 }
6238
6239 static void
6240 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6241 {
6242 }
6243
6244 static void
6245 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6246 {
6247         return;
6248 }
6249
6250 void
6251 iwm_init_task(void *arg1)
6252 {
6253         struct iwm_softc *sc = arg1;
6254
6255         IWM_LOCK(sc);
6256         while (sc->sc_flags & IWM_FLAG_BUSY)
6257                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6258         sc->sc_flags |= IWM_FLAG_BUSY;
6259         iwm_stop(sc);
6260         if (sc->sc_ic.ic_nrunning > 0)
6261                 iwm_init(sc);
6262         sc->sc_flags &= ~IWM_FLAG_BUSY;
6263         wakeup(&sc->sc_flags);
6264         IWM_UNLOCK(sc);
6265 }
6266
6267 static int
6268 iwm_resume(device_t dev)
6269 {
6270         struct iwm_softc *sc = device_get_softc(dev);
6271         int do_reinit = 0;
6272         uint16_t reg;
6273
6274         /* Clear device-specific "PCI retry timeout" register (41h). */
6275         reg = pci_read_config(dev, 0x40, sizeof(reg));
6276         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
6277         iwm_init_task(device_get_softc(dev));
6278
6279         IWM_LOCK(sc);
6280         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6281                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6282                 do_reinit = 1;
6283         }
6284         IWM_UNLOCK(sc);
6285
6286         if (do_reinit)
6287                 ieee80211_resume_all(&sc->sc_ic);
6288
6289         return 0;
6290 }
6291
6292 static int
6293 iwm_suspend(device_t dev)
6294 {
6295         int do_stop = 0;
6296         struct iwm_softc *sc = device_get_softc(dev);
6297
6298         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6299
6300         ieee80211_suspend_all(&sc->sc_ic);
6301
6302         if (do_stop) {
6303                 IWM_LOCK(sc);
6304                 iwm_stop(sc);
6305                 sc->sc_flags |= IWM_FLAG_SCANNING;
6306                 IWM_UNLOCK(sc);
6307         }
6308
6309         return (0);
6310 }
6311
6312 static int
6313 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6314 {
6315         struct iwm_fw_info *fw = &sc->sc_fw;
6316         device_t dev = sc->sc_dev;
6317         int i;
6318
6319         if (!sc->sc_attached)
6320                 return 0;
6321         sc->sc_attached = 0;
6322
6323         if (do_net80211)
6324                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6325
6326         callout_drain(&sc->sc_led_blink_to);
6327         callout_drain(&sc->sc_watchdog_to);
6328         iwm_stop_device(sc);
6329         if (do_net80211) {
6330                 ieee80211_ifdetach(&sc->sc_ic);
6331         }
6332
6333         iwm_phy_db_free(sc->sc_phy_db);
6334         sc->sc_phy_db = NULL;
6335
6336         iwm_free_nvm_data(sc->nvm_data);
6337
6338         /* Free descriptor rings */
6339         iwm_free_rx_ring(sc, &sc->rxq);
6340         for (i = 0; i < nitems(sc->txq); i++)
6341                 iwm_free_tx_ring(sc, &sc->txq[i]);
6342
6343         /* Free firmware */
6344         if (fw->fw_fp != NULL)
6345                 iwm_fw_info_free(fw);
6346
6347         /* Free scheduler */
6348         iwm_dma_contig_free(&sc->sched_dma);
6349         iwm_dma_contig_free(&sc->ict_dma);
6350         iwm_dma_contig_free(&sc->kw_dma);
6351         iwm_dma_contig_free(&sc->fw_dma);
6352
6353         /* Finished with the hardware - detach things */
6354         iwm_pci_detach(dev);
6355
6356         mbufq_drain(&sc->sc_snd);
6357         IWM_LOCK_DESTROY(sc);
6358
6359         return (0);
6360 }
6361
6362 static int
6363 iwm_detach(device_t dev)
6364 {
6365         struct iwm_softc *sc = device_get_softc(dev);
6366
6367         return (iwm_detach_local(sc, 1));
6368 }
6369
6370 static device_method_t iwm_pci_methods[] = {
6371         /* Device interface */
6372         DEVMETHOD(device_probe,         iwm_probe),
6373         DEVMETHOD(device_attach,        iwm_attach),
6374         DEVMETHOD(device_detach,        iwm_detach),
6375         DEVMETHOD(device_suspend,       iwm_suspend),
6376         DEVMETHOD(device_resume,        iwm_resume),
6377
6378         DEVMETHOD_END
6379 };
6380
6381 static driver_t iwm_pci_driver = {
6382         "iwm",
6383         iwm_pci_methods,
6384         sizeof (struct iwm_softc)
6385 };
6386
6387 static devclass_t iwm_devclass;
6388
6389 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6390 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6391 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6392 MODULE_DEPEND(iwm, wlan, 1, 1, 1);