]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
MFC r314070:
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109
110 #include <sys/param.h>
111 #include <sys/bus.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
126
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
130
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
133
134 #include <net/bpf.h>
135
136 #include <net/if.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
142
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
147
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
152
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_notif_wait.h>
157 #include <dev/iwm/if_iwm_util.h>
158 #include <dev/iwm/if_iwm_binding.h>
159 #include <dev/iwm/if_iwm_phy_db.h>
160 #include <dev/iwm/if_iwm_mac_ctxt.h>
161 #include <dev/iwm/if_iwm_phy_ctxt.h>
162 #include <dev/iwm/if_iwm_time_event.h>
163 #include <dev/iwm/if_iwm_power.h>
164 #include <dev/iwm/if_iwm_scan.h>
165
166 #include <dev/iwm/if_iwm_pcie_trans.h>
167 #include <dev/iwm/if_iwm_led.h>
168
169 #define IWM_NVM_HW_SECTION_NUM_FAMILY_7000      0
170 #define IWM_NVM_HW_SECTION_NUM_FAMILY_8000      10
171
172 /* lower blocks contain EEPROM image and calibration data */
173 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000      (16 * 512 * sizeof(uint16_t)) /* 16 KB */
174 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000      (32 * 512 * sizeof(uint16_t)) /* 32 KB */
175
176 #define IWM7260_FW      "iwm7260fw"
177 #define IWM3160_FW      "iwm3160fw"
178 #define IWM7265_FW      "iwm7265fw"
179 #define IWM7265D_FW     "iwm7265Dfw"
180 #define IWM8000_FW      "iwm8000Cfw"
181
182 #define IWM_DEVICE_7000_COMMON                                          \
183         .device_family = IWM_DEVICE_FAMILY_7000,                        \
184         .eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000,              \
185         .nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_7000,       \
186         .apmg_wake_up_wa = 1
187
188 const struct iwm_cfg iwm7260_cfg = {
189         .fw_name = IWM7260_FW,
190         IWM_DEVICE_7000_COMMON,
191         .host_interrupt_operation_mode = 1,
192 };
193
194 const struct iwm_cfg iwm3160_cfg = {
195         .fw_name = IWM3160_FW,
196         IWM_DEVICE_7000_COMMON,
197         .host_interrupt_operation_mode = 1,
198 };
199
200 const struct iwm_cfg iwm3165_cfg = {
201         /* XXX IWM7265D_FW doesn't seem to work properly yet */
202         .fw_name = IWM7265_FW,
203         IWM_DEVICE_7000_COMMON,
204         .host_interrupt_operation_mode = 0,
205 };
206
207 const struct iwm_cfg iwm7265_cfg = {
208         .fw_name = IWM7265_FW,
209         IWM_DEVICE_7000_COMMON,
210         .host_interrupt_operation_mode = 0,
211 };
212
213 const struct iwm_cfg iwm7265d_cfg = {
214         /* XXX IWM7265D_FW doesn't seem to work properly yet */
215         .fw_name = IWM7265_FW,
216         IWM_DEVICE_7000_COMMON,
217         .host_interrupt_operation_mode = 0,
218 };
219
220 #define IWM_DEVICE_8000_COMMON                                          \
221         .device_family = IWM_DEVICE_FAMILY_8000,                        \
222         .eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000,              \
223         .nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_8000
224
225 const struct iwm_cfg iwm8260_cfg = {
226         .fw_name = IWM8000_FW,
227         IWM_DEVICE_8000_COMMON,
228         .host_interrupt_operation_mode = 0,
229 };
230
231 const uint8_t iwm_nvm_channels[] = {
232         /* 2.4 GHz */
233         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
234         /* 5 GHz */
235         36, 40, 44, 48, 52, 56, 60, 64,
236         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
237         149, 153, 157, 161, 165
238 };
239 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
240     "IWM_NUM_CHANNELS is too small");
241
242 const uint8_t iwm_nvm_channels_8000[] = {
243         /* 2.4 GHz */
244         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
245         /* 5 GHz */
246         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
247         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
248         149, 153, 157, 161, 165, 169, 173, 177, 181
249 };
250 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
251     "IWM_NUM_CHANNELS_8000 is too small");
252
253 #define IWM_NUM_2GHZ_CHANNELS   14
254 #define IWM_N_HW_ADDR_MASK      0xF
255
256 /*
257  * XXX For now, there's simply a fixed set of rate table entries
258  * that are populated.
259  */
260 const struct iwm_rate {
261         uint8_t rate;
262         uint8_t plcp;
263 } iwm_rates[] = {
264         {   2,  IWM_RATE_1M_PLCP  },
265         {   4,  IWM_RATE_2M_PLCP  },
266         {  11,  IWM_RATE_5M_PLCP  },
267         {  22,  IWM_RATE_11M_PLCP },
268         {  12,  IWM_RATE_6M_PLCP  },
269         {  18,  IWM_RATE_9M_PLCP  },
270         {  24,  IWM_RATE_12M_PLCP },
271         {  36,  IWM_RATE_18M_PLCP },
272         {  48,  IWM_RATE_24M_PLCP },
273         {  72,  IWM_RATE_36M_PLCP },
274         {  96,  IWM_RATE_48M_PLCP },
275         { 108,  IWM_RATE_54M_PLCP },
276 };
277 #define IWM_RIDX_CCK    0
278 #define IWM_RIDX_OFDM   4
279 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
280 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
281 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
282
283 struct iwm_nvm_section {
284         uint16_t length;
285         uint8_t *data;
286 };
287
288 #define IWM_MVM_UCODE_ALIVE_TIMEOUT     hz
289 #define IWM_MVM_UCODE_CALIB_TIMEOUT     (2*hz)
290
291 struct iwm_mvm_alive_data {
292         int valid;
293         uint32_t scd_base_addr;
294 };
295
296 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
297 static int      iwm_firmware_store_section(struct iwm_softc *,
298                                            enum iwm_ucode_type,
299                                            const uint8_t *, size_t);
300 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
301 static void     iwm_fw_info_free(struct iwm_fw_info *);
302 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
303 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
304 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
305                                      bus_size_t, bus_size_t);
306 static void     iwm_dma_contig_free(struct iwm_dma_info *);
307 static int      iwm_alloc_fwmem(struct iwm_softc *);
308 static int      iwm_alloc_sched(struct iwm_softc *);
309 static int      iwm_alloc_kw(struct iwm_softc *);
310 static int      iwm_alloc_ict(struct iwm_softc *);
311 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
312 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
313 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
314 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
315                                   int);
316 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
317 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
318 static void     iwm_enable_interrupts(struct iwm_softc *);
319 static void     iwm_restore_interrupts(struct iwm_softc *);
320 static void     iwm_disable_interrupts(struct iwm_softc *);
321 static void     iwm_ict_reset(struct iwm_softc *);
322 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
323 static void     iwm_stop_device(struct iwm_softc *);
324 static void     iwm_mvm_nic_config(struct iwm_softc *);
325 static int      iwm_nic_rx_init(struct iwm_softc *);
326 static int      iwm_nic_tx_init(struct iwm_softc *);
327 static int      iwm_nic_init(struct iwm_softc *);
328 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
329 static int      iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
330 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
331                                    uint16_t, uint8_t *, uint16_t *);
332 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
333                                      uint16_t *, uint32_t);
334 static uint32_t iwm_eeprom_channel_flags(uint16_t);
335 static void     iwm_add_channel_band(struct iwm_softc *,
336                     struct ieee80211_channel[], int, int *, int, size_t,
337                     const uint8_t[]);
338 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
339                     struct ieee80211_channel[]);
340 static struct iwm_nvm_data *
341         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
342                            const uint16_t *, const uint16_t *,
343                            const uint16_t *, const uint16_t *,
344                            const uint16_t *);
345 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
346 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
347                                                struct iwm_nvm_data *,
348                                                const uint16_t *,
349                                                const uint16_t *);
350 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
351                             const uint16_t *);
352 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
353 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
354                                   const uint16_t *);
355 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
356                                    const uint16_t *);
357 static void     iwm_set_radio_cfg(const struct iwm_softc *,
358                                   struct iwm_nvm_data *, uint32_t);
359 static struct iwm_nvm_data *
360         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
361 static int      iwm_nvm_init(struct iwm_softc *);
362 static int      iwm_pcie_load_section(struct iwm_softc *, uint8_t,
363                                       const struct iwm_fw_desc *);
364 static int      iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
365                                              bus_addr_t, uint32_t);
366 static int      iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
367                                                 const struct iwm_fw_sects *,
368                                                 int, int *);
369 static int      iwm_pcie_load_cpu_sections(struct iwm_softc *,
370                                            const struct iwm_fw_sects *,
371                                            int, int *);
372 static int      iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
373                                                const struct iwm_fw_sects *);
374 static int      iwm_pcie_load_given_ucode(struct iwm_softc *,
375                                           const struct iwm_fw_sects *);
376 static int      iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
377 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
378 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
379 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
380                                               enum iwm_ucode_type);
381 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
382 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
383 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
384 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
385                                             struct iwm_rx_phy_info *);
386 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
387                                       struct iwm_rx_packet *,
388                                       struct iwm_rx_data *);
389 static int      iwm_get_noise(struct iwm_softc *sc,
390                     const struct iwm_mvm_statistics_rx_non_phy *);
391 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
392                                    struct iwm_rx_data *);
393 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
394                                          struct iwm_rx_packet *,
395                                          struct iwm_node *);
396 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
397                                   struct iwm_rx_data *);
398 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
399 #if 0
400 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
401                                  uint16_t);
402 #endif
403 static const struct iwm_rate *
404         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
405                         struct mbuf *, struct iwm_tx_cmd *);
406 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
407                        struct ieee80211_node *, int);
408 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
409                              const struct ieee80211_bpf_params *);
410 static int      iwm_mvm_flush_tx_path(struct iwm_softc *sc,
411                                       uint32_t tfd_msk, uint32_t flags);
412 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
413                                                 struct iwm_mvm_add_sta_cmd_v7 *,
414                                                 int *);
415 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
416                                        int);
417 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
418 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
419 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
420                                            struct iwm_int_sta *,
421                                            const uint8_t *, uint16_t, uint16_t);
422 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
423 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
424 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
425 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
426 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
427 static struct ieee80211_node *
428                 iwm_node_alloc(struct ieee80211vap *,
429                                const uint8_t[IEEE80211_ADDR_LEN]);
430 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
431 static int      iwm_media_change(struct ifnet *);
432 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
433 static void     iwm_endscan_cb(void *, int);
434 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
435                                         struct iwm_sf_cfg_cmd *,
436                                         struct ieee80211_node *);
437 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
438 static int      iwm_send_bt_init_conf(struct iwm_softc *);
439 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
440 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
441 static int      iwm_init_hw(struct iwm_softc *);
442 static void     iwm_init(struct iwm_softc *);
443 static void     iwm_start(struct iwm_softc *);
444 static void     iwm_stop(struct iwm_softc *);
445 static void     iwm_watchdog(void *);
446 static void     iwm_parent(struct ieee80211com *);
447 #ifdef IWM_DEBUG
448 static const char *
449                 iwm_desc_lookup(uint32_t);
450 static void     iwm_nic_error(struct iwm_softc *);
451 static void     iwm_nic_umac_error(struct iwm_softc *);
452 #endif
453 static void     iwm_notif_intr(struct iwm_softc *);
454 static void     iwm_intr(void *);
455 static int      iwm_attach(device_t);
456 static int      iwm_is_valid_ether_addr(uint8_t *);
457 static void     iwm_preinit(void *);
458 static int      iwm_detach_local(struct iwm_softc *sc, int);
459 static void     iwm_init_task(void *);
460 static void     iwm_radiotap_attach(struct iwm_softc *);
461 static struct ieee80211vap *
462                 iwm_vap_create(struct ieee80211com *,
463                                const char [IFNAMSIZ], int,
464                                enum ieee80211_opmode, int,
465                                const uint8_t [IEEE80211_ADDR_LEN],
466                                const uint8_t [IEEE80211_ADDR_LEN]);
467 static void     iwm_vap_delete(struct ieee80211vap *);
468 static void     iwm_scan_start(struct ieee80211com *);
469 static void     iwm_scan_end(struct ieee80211com *);
470 static void     iwm_update_mcast(struct ieee80211com *);
471 static void     iwm_set_channel(struct ieee80211com *);
472 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
473 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
474 static int      iwm_detach(device_t);
475
476 /*
477  * Firmware parser.
478  */
479
480 static int
481 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
482 {
483         const struct iwm_fw_cscheme_list *l = (const void *)data;
484
485         if (dlen < sizeof(*l) ||
486             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
487                 return EINVAL;
488
489         /* we don't actually store anything for now, always use s/w crypto */
490
491         return 0;
492 }
493
494 static int
495 iwm_firmware_store_section(struct iwm_softc *sc,
496     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
497 {
498         struct iwm_fw_sects *fws;
499         struct iwm_fw_desc *fwone;
500
501         if (type >= IWM_UCODE_TYPE_MAX)
502                 return EINVAL;
503         if (dlen < sizeof(uint32_t))
504                 return EINVAL;
505
506         fws = &sc->sc_fw.fw_sects[type];
507         if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
508                 return EINVAL;
509
510         fwone = &fws->fw_sect[fws->fw_count];
511
512         /* first 32bit are device load offset */
513         memcpy(&fwone->offset, data, sizeof(uint32_t));
514
515         /* rest is data */
516         fwone->data = data + sizeof(uint32_t);
517         fwone->len = dlen - sizeof(uint32_t);
518
519         fws->fw_count++;
520
521         return 0;
522 }
523
524 #define IWM_DEFAULT_SCAN_CHANNELS 40
525
526 /* iwlwifi: iwl-drv.c */
527 struct iwm_tlv_calib_data {
528         uint32_t ucode_type;
529         struct iwm_tlv_calib_ctrl calib;
530 } __packed;
531
532 static int
533 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
534 {
535         const struct iwm_tlv_calib_data *def_calib = data;
536         uint32_t ucode_type = le32toh(def_calib->ucode_type);
537
538         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
539                 device_printf(sc->sc_dev,
540                     "Wrong ucode_type %u for default "
541                     "calibration.\n", ucode_type);
542                 return EINVAL;
543         }
544
545         sc->sc_default_calib[ucode_type].flow_trigger =
546             def_calib->calib.flow_trigger;
547         sc->sc_default_calib[ucode_type].event_trigger =
548             def_calib->calib.event_trigger;
549
550         return 0;
551 }
552
553 static void
554 iwm_fw_info_free(struct iwm_fw_info *fw)
555 {
556         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
557         fw->fw_fp = NULL;
558         /* don't touch fw->fw_status */
559         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
560 }
561
562 static int
563 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
564 {
565         struct iwm_fw_info *fw = &sc->sc_fw;
566         const struct iwm_tlv_ucode_header *uhdr;
567         struct iwm_ucode_tlv tlv;
568         enum iwm_ucode_tlv_type tlv_type;
569         const struct firmware *fwp;
570         const uint8_t *data;
571         uint32_t usniffer_img;
572         uint32_t paging_mem_size;
573         int num_of_cpus;
574         int error = 0;
575         size_t len;
576
577         if (fw->fw_status == IWM_FW_STATUS_DONE &&
578             ucode_type != IWM_UCODE_INIT)
579                 return 0;
580
581         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
582                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
583         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
584
585         if (fw->fw_fp != NULL)
586                 iwm_fw_info_free(fw);
587
588         /*
589          * Load firmware into driver memory.
590          * fw_fp will be set.
591          */
592         IWM_UNLOCK(sc);
593         fwp = firmware_get(sc->cfg->fw_name);
594         IWM_LOCK(sc);
595         if (fwp == NULL) {
596                 device_printf(sc->sc_dev,
597                     "could not read firmware %s (error %d)\n",
598                     sc->cfg->fw_name, error);
599                 goto out;
600         }
601         fw->fw_fp = fwp;
602
603         /* (Re-)Initialize default values. */
604         sc->sc_capaflags = 0;
605         sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
606         memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
607         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
608
609         /*
610          * Parse firmware contents
611          */
612
613         uhdr = (const void *)fw->fw_fp->data;
614         if (*(const uint32_t *)fw->fw_fp->data != 0
615             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
616                 device_printf(sc->sc_dev, "invalid firmware %s\n",
617                     sc->cfg->fw_name);
618                 error = EINVAL;
619                 goto out;
620         }
621
622         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
623             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
624             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
625             IWM_UCODE_API(le32toh(uhdr->ver)));
626         data = uhdr->data;
627         len = fw->fw_fp->datasize - sizeof(*uhdr);
628
629         while (len >= sizeof(tlv)) {
630                 size_t tlv_len;
631                 const void *tlv_data;
632
633                 memcpy(&tlv, data, sizeof(tlv));
634                 tlv_len = le32toh(tlv.length);
635                 tlv_type = le32toh(tlv.type);
636
637                 len -= sizeof(tlv);
638                 data += sizeof(tlv);
639                 tlv_data = data;
640
641                 if (len < tlv_len) {
642                         device_printf(sc->sc_dev,
643                             "firmware too short: %zu bytes\n",
644                             len);
645                         error = EINVAL;
646                         goto parse_out;
647                 }
648
649                 switch ((int)tlv_type) {
650                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
651                         if (tlv_len < sizeof(uint32_t)) {
652                                 device_printf(sc->sc_dev,
653                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
654                                     __func__,
655                                     (int) tlv_len);
656                                 error = EINVAL;
657                                 goto parse_out;
658                         }
659                         sc->sc_capa_max_probe_len
660                             = le32toh(*(const uint32_t *)tlv_data);
661                         /* limit it to something sensible */
662                         if (sc->sc_capa_max_probe_len >
663                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
664                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
665                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
666                                     "ridiculous\n", __func__);
667                                 error = EINVAL;
668                                 goto parse_out;
669                         }
670                         break;
671                 case IWM_UCODE_TLV_PAN:
672                         if (tlv_len) {
673                                 device_printf(sc->sc_dev,
674                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
675                                     __func__,
676                                     (int) tlv_len);
677                                 error = EINVAL;
678                                 goto parse_out;
679                         }
680                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
681                         break;
682                 case IWM_UCODE_TLV_FLAGS:
683                         if (tlv_len < sizeof(uint32_t)) {
684                                 device_printf(sc->sc_dev,
685                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
686                                     __func__,
687                                     (int) tlv_len);
688                                 error = EINVAL;
689                                 goto parse_out;
690                         }
691                         /*
692                          * Apparently there can be many flags, but Linux driver
693                          * parses only the first one, and so do we.
694                          *
695                          * XXX: why does this override IWM_UCODE_TLV_PAN?
696                          * Intentional or a bug?  Observations from
697                          * current firmware file:
698                          *  1) TLV_PAN is parsed first
699                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
700                          * ==> this resets TLV_PAN to itself... hnnnk
701                          */
702                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
703                         break;
704                 case IWM_UCODE_TLV_CSCHEME:
705                         if ((error = iwm_store_cscheme(sc,
706                             tlv_data, tlv_len)) != 0) {
707                                 device_printf(sc->sc_dev,
708                                     "%s: iwm_store_cscheme(): returned %d\n",
709                                     __func__,
710                                     error);
711                                 goto parse_out;
712                         }
713                         break;
714                 case IWM_UCODE_TLV_NUM_OF_CPU:
715                         if (tlv_len != sizeof(uint32_t)) {
716                                 device_printf(sc->sc_dev,
717                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
718                                     __func__,
719                                     (int) tlv_len);
720                                 error = EINVAL;
721                                 goto parse_out;
722                         }
723                         num_of_cpus = le32toh(*(const uint32_t *)tlv_data);
724                         if (num_of_cpus == 2) {
725                                 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
726                                         TRUE;
727                                 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
728                                         TRUE;
729                                 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
730                                         TRUE;
731                         } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
732                                 device_printf(sc->sc_dev,
733                                     "%s: Driver supports only 1 or 2 CPUs\n",
734                                     __func__);
735                                 error = EINVAL;
736                                 goto parse_out;
737                         }
738                         break;
739                 case IWM_UCODE_TLV_SEC_RT:
740                         if ((error = iwm_firmware_store_section(sc,
741                             IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
742                                 device_printf(sc->sc_dev,
743                                     "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
744                                     __func__,
745                                     error);
746                                 goto parse_out;
747                         }
748                         break;
749                 case IWM_UCODE_TLV_SEC_INIT:
750                         if ((error = iwm_firmware_store_section(sc,
751                             IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
752                                 device_printf(sc->sc_dev,
753                                     "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
754                                     __func__,
755                                     error);
756                                 goto parse_out;
757                         }
758                         break;
759                 case IWM_UCODE_TLV_SEC_WOWLAN:
760                         if ((error = iwm_firmware_store_section(sc,
761                             IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
762                                 device_printf(sc->sc_dev,
763                                     "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
764                                     __func__,
765                                     error);
766                                 goto parse_out;
767                         }
768                         break;
769                 case IWM_UCODE_TLV_DEF_CALIB:
770                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
771                                 device_printf(sc->sc_dev,
772                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
773                                     __func__,
774                                     (int) tlv_len,
775                                     (int) sizeof(struct iwm_tlv_calib_data));
776                                 error = EINVAL;
777                                 goto parse_out;
778                         }
779                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
780                                 device_printf(sc->sc_dev,
781                                     "%s: iwm_set_default_calib() failed: %d\n",
782                                     __func__,
783                                     error);
784                                 goto parse_out;
785                         }
786                         break;
787                 case IWM_UCODE_TLV_PHY_SKU:
788                         if (tlv_len != sizeof(uint32_t)) {
789                                 error = EINVAL;
790                                 device_printf(sc->sc_dev,
791                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
792                                     __func__,
793                                     (int) tlv_len);
794                                 goto parse_out;
795                         }
796                         sc->sc_fw.phy_config =
797                             le32toh(*(const uint32_t *)tlv_data);
798                         sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
799                                                   IWM_FW_PHY_CFG_TX_CHAIN) >>
800                                                   IWM_FW_PHY_CFG_TX_CHAIN_POS;
801                         sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
802                                                   IWM_FW_PHY_CFG_RX_CHAIN) >>
803                                                   IWM_FW_PHY_CFG_RX_CHAIN_POS;
804                         break;
805
806                 case IWM_UCODE_TLV_API_CHANGES_SET: {
807                         const struct iwm_ucode_api *api;
808                         if (tlv_len != sizeof(*api)) {
809                                 error = EINVAL;
810                                 goto parse_out;
811                         }
812                         api = (const struct iwm_ucode_api *)tlv_data;
813                         /* Flags may exceed 32 bits in future firmware. */
814                         if (le32toh(api->api_index) > 0) {
815                                 device_printf(sc->sc_dev,
816                                     "unsupported API index %d\n",
817                                     le32toh(api->api_index));
818                                 goto parse_out;
819                         }
820                         sc->sc_ucode_api = le32toh(api->api_flags);
821                         break;
822                 }
823
824                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
825                         const struct iwm_ucode_capa *capa;
826                         int idx, i;
827                         if (tlv_len != sizeof(*capa)) {
828                                 error = EINVAL;
829                                 goto parse_out;
830                         }
831                         capa = (const struct iwm_ucode_capa *)tlv_data;
832                         idx = le32toh(capa->api_index);
833                         if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
834                                 device_printf(sc->sc_dev,
835                                     "unsupported API index %d\n", idx);
836                                 goto parse_out;
837                         }
838                         for (i = 0; i < 32; i++) {
839                                 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
840                                         continue;
841                                 setbit(sc->sc_enabled_capa, i + (32 * idx));
842                         }
843                         break;
844                 }
845
846                 case 48: /* undocumented TLV */
847                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
848                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
849                         /* ignore, not used by current driver */
850                         break;
851
852                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
853                         if ((error = iwm_firmware_store_section(sc,
854                             IWM_UCODE_REGULAR_USNIFFER, tlv_data,
855                             tlv_len)) != 0)
856                                 goto parse_out;
857                         break;
858
859                 case IWM_UCODE_TLV_PAGING:
860                         if (tlv_len != sizeof(uint32_t)) {
861                                 error = EINVAL;
862                                 goto parse_out;
863                         }
864                         paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
865
866                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
867                             "%s: Paging: paging enabled (size = %u bytes)\n",
868                             __func__, paging_mem_size);
869                         if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
870                                 device_printf(sc->sc_dev,
871                                         "%s: Paging: driver supports up to %u bytes for paging image\n",
872                                         __func__, IWM_MAX_PAGING_IMAGE_SIZE);
873                                 error = EINVAL;
874                                 goto out;
875                         }
876                         if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
877                                 device_printf(sc->sc_dev,
878                                     "%s: Paging: image isn't multiple %u\n",
879                                     __func__, IWM_FW_PAGING_SIZE);
880                                 error = EINVAL;
881                                 goto out;
882                         }
883
884                         sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
885                             paging_mem_size;
886                         usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
887                         sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
888                             paging_mem_size;
889                         break;
890
891                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
892                         if (tlv_len != sizeof(uint32_t)) {
893                                 error = EINVAL;
894                                 goto parse_out;
895                         }
896                         sc->sc_capa_n_scan_channels =
897                           le32toh(*(const uint32_t *)tlv_data);
898                         break;
899
900                 case IWM_UCODE_TLV_FW_VERSION:
901                         if (tlv_len != sizeof(uint32_t) * 3) {
902                                 error = EINVAL;
903                                 goto parse_out;
904                         }
905                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
906                             "%d.%d.%d",
907                             le32toh(((const uint32_t *)tlv_data)[0]),
908                             le32toh(((const uint32_t *)tlv_data)[1]),
909                             le32toh(((const uint32_t *)tlv_data)[2]));
910                         break;
911
912                 case IWM_UCODE_TLV_FW_MEM_SEG:
913                         break;
914
915                 default:
916                         device_printf(sc->sc_dev,
917                             "%s: unknown firmware section %d, abort\n",
918                             __func__, tlv_type);
919                         error = EINVAL;
920                         goto parse_out;
921                 }
922
923                 len -= roundup(tlv_len, 4);
924                 data += roundup(tlv_len, 4);
925         }
926
927         KASSERT(error == 0, ("unhandled error"));
928
929  parse_out:
930         if (error) {
931                 device_printf(sc->sc_dev, "firmware parse error %d, "
932                     "section type %d\n", error, tlv_type);
933         }
934
935         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
936                 device_printf(sc->sc_dev,
937                     "device uses unsupported power ops\n");
938                 error = ENOTSUP;
939         }
940
941  out:
942         if (error) {
943                 fw->fw_status = IWM_FW_STATUS_NONE;
944                 if (fw->fw_fp != NULL)
945                         iwm_fw_info_free(fw);
946         } else
947                 fw->fw_status = IWM_FW_STATUS_DONE;
948         wakeup(&sc->sc_fw);
949
950         return error;
951 }
952
953 /*
954  * DMA resource routines
955  */
956
957 static void
958 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
959 {
960         if (error != 0)
961                 return;
962         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
963         *(bus_addr_t *)arg = segs[0].ds_addr;
964 }
965
966 static int
967 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
968     bus_size_t size, bus_size_t alignment)
969 {
970         int error;
971
972         dma->tag = NULL;
973         dma->map = NULL;
974         dma->size = size;
975         dma->vaddr = NULL;
976
977         error = bus_dma_tag_create(tag, alignment,
978             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
979             1, size, 0, NULL, NULL, &dma->tag);
980         if (error != 0)
981                 goto fail;
982
983         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
984             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
985         if (error != 0)
986                 goto fail;
987
988         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
989             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
990         if (error != 0) {
991                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
992                 dma->vaddr = NULL;
993                 goto fail;
994         }
995
996         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
997
998         return 0;
999
1000 fail:
1001         iwm_dma_contig_free(dma);
1002
1003         return error;
1004 }
1005
1006 static void
1007 iwm_dma_contig_free(struct iwm_dma_info *dma)
1008 {
1009         if (dma->vaddr != NULL) {
1010                 bus_dmamap_sync(dma->tag, dma->map,
1011                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1012                 bus_dmamap_unload(dma->tag, dma->map);
1013                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1014                 dma->vaddr = NULL;
1015         }
1016         if (dma->tag != NULL) {
1017                 bus_dma_tag_destroy(dma->tag);
1018                 dma->tag = NULL;
1019         }
1020 }
1021
1022 /* fwmem is used to load firmware onto the card */
1023 static int
1024 iwm_alloc_fwmem(struct iwm_softc *sc)
1025 {
1026         /* Must be aligned on a 16-byte boundary. */
1027         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
1028             IWM_FH_MEM_TB_MAX_LENGTH, 16);
1029 }
1030
1031 /* tx scheduler rings.  not used? */
1032 static int
1033 iwm_alloc_sched(struct iwm_softc *sc)
1034 {
1035         /* TX scheduler rings must be aligned on a 1KB boundary. */
1036         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
1037             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
1038 }
1039
1040 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
1041 static int
1042 iwm_alloc_kw(struct iwm_softc *sc)
1043 {
1044         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
1045 }
1046
1047 /* interrupt cause table */
1048 static int
1049 iwm_alloc_ict(struct iwm_softc *sc)
1050 {
1051         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
1052             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
1053 }
1054
1055 static int
1056 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1057 {
1058         bus_size_t size;
1059         int i, error;
1060
1061         ring->cur = 0;
1062
1063         /* Allocate RX descriptors (256-byte aligned). */
1064         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1065         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1066         if (error != 0) {
1067                 device_printf(sc->sc_dev,
1068                     "could not allocate RX ring DMA memory\n");
1069                 goto fail;
1070         }
1071         ring->desc = ring->desc_dma.vaddr;
1072
1073         /* Allocate RX status area (16-byte aligned). */
1074         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1075             sizeof(*ring->stat), 16);
1076         if (error != 0) {
1077                 device_printf(sc->sc_dev,
1078                     "could not allocate RX status DMA memory\n");
1079                 goto fail;
1080         }
1081         ring->stat = ring->stat_dma.vaddr;
1082
1083         /* Create RX buffer DMA tag. */
1084         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1085             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1086             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1087         if (error != 0) {
1088                 device_printf(sc->sc_dev,
1089                     "%s: could not create RX buf DMA tag, error %d\n",
1090                     __func__, error);
1091                 goto fail;
1092         }
1093
1094         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1095         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1096         if (error != 0) {
1097                 device_printf(sc->sc_dev,
1098                     "%s: could not create RX buf DMA map, error %d\n",
1099                     __func__, error);
1100                 goto fail;
1101         }
1102         /*
1103          * Allocate and map RX buffers.
1104          */
1105         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1106                 struct iwm_rx_data *data = &ring->data[i];
1107                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1108                 if (error != 0) {
1109                         device_printf(sc->sc_dev,
1110                             "%s: could not create RX buf DMA map, error %d\n",
1111                             __func__, error);
1112                         goto fail;
1113                 }
1114                 data->m = NULL;
1115
1116                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1117                         goto fail;
1118                 }
1119         }
1120         return 0;
1121
1122 fail:   iwm_free_rx_ring(sc, ring);
1123         return error;
1124 }
1125
1126 static void
1127 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1128 {
1129         /* Reset the ring state */
1130         ring->cur = 0;
1131
1132         /*
1133          * The hw rx ring index in shared memory must also be cleared,
1134          * otherwise the discrepancy can cause reprocessing chaos.
1135          */
1136         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1137 }
1138
1139 static void
1140 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1141 {
1142         int i;
1143
1144         iwm_dma_contig_free(&ring->desc_dma);
1145         iwm_dma_contig_free(&ring->stat_dma);
1146
1147         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1148                 struct iwm_rx_data *data = &ring->data[i];
1149
1150                 if (data->m != NULL) {
1151                         bus_dmamap_sync(ring->data_dmat, data->map,
1152                             BUS_DMASYNC_POSTREAD);
1153                         bus_dmamap_unload(ring->data_dmat, data->map);
1154                         m_freem(data->m);
1155                         data->m = NULL;
1156                 }
1157                 if (data->map != NULL) {
1158                         bus_dmamap_destroy(ring->data_dmat, data->map);
1159                         data->map = NULL;
1160                 }
1161         }
1162         if (ring->spare_map != NULL) {
1163                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1164                 ring->spare_map = NULL;
1165         }
1166         if (ring->data_dmat != NULL) {
1167                 bus_dma_tag_destroy(ring->data_dmat);
1168                 ring->data_dmat = NULL;
1169         }
1170 }
1171
1172 static int
1173 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1174 {
1175         bus_addr_t paddr;
1176         bus_size_t size;
1177         size_t maxsize;
1178         int nsegments;
1179         int i, error;
1180
1181         ring->qid = qid;
1182         ring->queued = 0;
1183         ring->cur = 0;
1184
1185         /* Allocate TX descriptors (256-byte aligned). */
1186         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1187         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1188         if (error != 0) {
1189                 device_printf(sc->sc_dev,
1190                     "could not allocate TX ring DMA memory\n");
1191                 goto fail;
1192         }
1193         ring->desc = ring->desc_dma.vaddr;
1194
1195         /*
1196          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1197          * to allocate commands space for other rings.
1198          */
1199         if (qid > IWM_MVM_CMD_QUEUE)
1200                 return 0;
1201
1202         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1203         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1204         if (error != 0) {
1205                 device_printf(sc->sc_dev,
1206                     "could not allocate TX cmd DMA memory\n");
1207                 goto fail;
1208         }
1209         ring->cmd = ring->cmd_dma.vaddr;
1210
1211         /* FW commands may require more mapped space than packets. */
1212         if (qid == IWM_MVM_CMD_QUEUE) {
1213                 maxsize = IWM_RBUF_SIZE;
1214                 nsegments = 1;
1215         } else {
1216                 maxsize = MCLBYTES;
1217                 nsegments = IWM_MAX_SCATTER - 2;
1218         }
1219
1220         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1221             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1222             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1223         if (error != 0) {
1224                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1225                 goto fail;
1226         }
1227
1228         paddr = ring->cmd_dma.paddr;
1229         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1230                 struct iwm_tx_data *data = &ring->data[i];
1231
1232                 data->cmd_paddr = paddr;
1233                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1234                     + offsetof(struct iwm_tx_cmd, scratch);
1235                 paddr += sizeof(struct iwm_device_cmd);
1236
1237                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1238                 if (error != 0) {
1239                         device_printf(sc->sc_dev,
1240                             "could not create TX buf DMA map\n");
1241                         goto fail;
1242                 }
1243         }
1244         KASSERT(paddr == ring->cmd_dma.paddr + size,
1245             ("invalid physical address"));
1246         return 0;
1247
1248 fail:   iwm_free_tx_ring(sc, ring);
1249         return error;
1250 }
1251
1252 static void
1253 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1254 {
1255         int i;
1256
1257         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1258                 struct iwm_tx_data *data = &ring->data[i];
1259
1260                 if (data->m != NULL) {
1261                         bus_dmamap_sync(ring->data_dmat, data->map,
1262                             BUS_DMASYNC_POSTWRITE);
1263                         bus_dmamap_unload(ring->data_dmat, data->map);
1264                         m_freem(data->m);
1265                         data->m = NULL;
1266                 }
1267         }
1268         /* Clear TX descriptors. */
1269         memset(ring->desc, 0, ring->desc_dma.size);
1270         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1271             BUS_DMASYNC_PREWRITE);
1272         sc->qfullmsk &= ~(1 << ring->qid);
1273         ring->queued = 0;
1274         ring->cur = 0;
1275
1276         if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1277                 iwm_pcie_clear_cmd_in_flight(sc);
1278 }
1279
1280 static void
1281 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1282 {
1283         int i;
1284
1285         iwm_dma_contig_free(&ring->desc_dma);
1286         iwm_dma_contig_free(&ring->cmd_dma);
1287
1288         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1289                 struct iwm_tx_data *data = &ring->data[i];
1290
1291                 if (data->m != NULL) {
1292                         bus_dmamap_sync(ring->data_dmat, data->map,
1293                             BUS_DMASYNC_POSTWRITE);
1294                         bus_dmamap_unload(ring->data_dmat, data->map);
1295                         m_freem(data->m);
1296                         data->m = NULL;
1297                 }
1298                 if (data->map != NULL) {
1299                         bus_dmamap_destroy(ring->data_dmat, data->map);
1300                         data->map = NULL;
1301                 }
1302         }
1303         if (ring->data_dmat != NULL) {
1304                 bus_dma_tag_destroy(ring->data_dmat);
1305                 ring->data_dmat = NULL;
1306         }
1307 }
1308
1309 /*
1310  * High-level hardware frobbing routines
1311  */
1312
1313 static void
1314 iwm_enable_interrupts(struct iwm_softc *sc)
1315 {
1316         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1317         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1318 }
1319
1320 static void
1321 iwm_restore_interrupts(struct iwm_softc *sc)
1322 {
1323         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1324 }
1325
1326 static void
1327 iwm_disable_interrupts(struct iwm_softc *sc)
1328 {
1329         /* disable interrupts */
1330         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1331
1332         /* acknowledge all interrupts */
1333         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1334         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1335 }
1336
1337 static void
1338 iwm_ict_reset(struct iwm_softc *sc)
1339 {
1340         iwm_disable_interrupts(sc);
1341
1342         /* Reset ICT table. */
1343         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1344         sc->ict_cur = 0;
1345
1346         /* Set physical address of ICT table (4KB aligned). */
1347         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1348             IWM_CSR_DRAM_INT_TBL_ENABLE
1349             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1350             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1351             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1352
1353         /* Switch to ICT interrupt mode in driver. */
1354         sc->sc_flags |= IWM_FLAG_USE_ICT;
1355
1356         /* Re-enable interrupts. */
1357         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1358         iwm_enable_interrupts(sc);
1359 }
1360
1361 /* iwlwifi pcie/trans.c */
1362
1363 /*
1364  * Since this .. hard-resets things, it's time to actually
1365  * mark the first vap (if any) as having no mac context.
1366  * It's annoying, but since the driver is potentially being
1367  * stop/start'ed whilst active (thanks openbsd port!) we
1368  * have to correctly track this.
1369  */
1370 static void
1371 iwm_stop_device(struct iwm_softc *sc)
1372 {
1373         struct ieee80211com *ic = &sc->sc_ic;
1374         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1375         int chnl, qid;
1376         uint32_t mask = 0;
1377
1378         /* tell the device to stop sending interrupts */
1379         iwm_disable_interrupts(sc);
1380
1381         /*
1382          * FreeBSD-local: mark the first vap as not-uploaded,
1383          * so the next transition through auth/assoc
1384          * will correctly populate the MAC context.
1385          */
1386         if (vap) {
1387                 struct iwm_vap *iv = IWM_VAP(vap);
1388                 iv->is_uploaded = 0;
1389         }
1390
1391         /* device going down, Stop using ICT table */
1392         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1393
1394         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1395
1396         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1397
1398         if (iwm_nic_lock(sc)) {
1399                 /* Stop each Tx DMA channel */
1400                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1401                         IWM_WRITE(sc,
1402                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1403                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1404                 }
1405
1406                 /* Wait for DMA channels to be idle */
1407                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1408                     5000)) {
1409                         device_printf(sc->sc_dev,
1410                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1411                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1412                 }
1413                 iwm_nic_unlock(sc);
1414         }
1415         iwm_pcie_rx_stop(sc);
1416
1417         /* Stop RX ring. */
1418         iwm_reset_rx_ring(sc, &sc->rxq);
1419
1420         /* Reset all TX rings. */
1421         for (qid = 0; qid < nitems(sc->txq); qid++)
1422                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1423
1424         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1425                 /* Power-down device's busmaster DMA clocks */
1426                 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1427                     IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1428                 DELAY(5);
1429         }
1430
1431         /* Make sure (redundant) we've released our request to stay awake */
1432         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1433             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1434
1435         /* Stop the device, and put it in low power state */
1436         iwm_apm_stop(sc);
1437
1438         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1439          * Clean again the interrupt here
1440          */
1441         iwm_disable_interrupts(sc);
1442         /* stop and reset the on-board processor */
1443         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1444
1445         /*
1446          * Even if we stop the HW, we still want the RF kill
1447          * interrupt
1448          */
1449         iwm_enable_rfkill_int(sc);
1450         iwm_check_rfkill(sc);
1451 }
1452
1453 /* iwlwifi: mvm/ops.c */
1454 static void
1455 iwm_mvm_nic_config(struct iwm_softc *sc)
1456 {
1457         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1458         uint32_t reg_val = 0;
1459         uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1460
1461         radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1462             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1463         radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1464             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1465         radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1466             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1467
1468         /* SKU control */
1469         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1470             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1471         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1472             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1473
1474         /* radio configuration */
1475         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1476         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1477         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1478
1479         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1480
1481         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1482             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1483             radio_cfg_step, radio_cfg_dash);
1484
1485         /*
1486          * W/A : NIC is stuck in a reset state after Early PCIe power off
1487          * (PCIe power is lost before PERST# is asserted), causing ME FW
1488          * to lose ownership and not being able to obtain it back.
1489          */
1490         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1491                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1492                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1493                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1494         }
1495 }
1496
1497 static int
1498 iwm_nic_rx_init(struct iwm_softc *sc)
1499 {
1500         /*
1501          * Initialize RX ring.  This is from the iwn driver.
1502          */
1503         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1504
1505         /* Stop Rx DMA */
1506         iwm_pcie_rx_stop(sc);
1507
1508         if (!iwm_nic_lock(sc))
1509                 return EBUSY;
1510
1511         /* reset and flush pointers */
1512         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1513         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1514         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1515         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1516
1517         /* Set physical address of RX ring (256-byte aligned). */
1518         IWM_WRITE(sc,
1519             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1520
1521         /* Set physical address of RX status (16-byte aligned). */
1522         IWM_WRITE(sc,
1523             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1524
1525         /* Enable RX. */
1526         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1527             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1528             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1529             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1530             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1531             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1532             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1533             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1534
1535         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1536
1537         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1538         if (sc->cfg->host_interrupt_operation_mode)
1539                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1540
1541         /*
1542          * Thus sayeth el jefe (iwlwifi) via a comment:
1543          *
1544          * This value should initially be 0 (before preparing any
1545          * RBs), should be 8 after preparing the first 8 RBs (for example)
1546          */
1547         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1548
1549         iwm_nic_unlock(sc);
1550
1551         return 0;
1552 }
1553
1554 static int
1555 iwm_nic_tx_init(struct iwm_softc *sc)
1556 {
1557         int qid;
1558
1559         if (!iwm_nic_lock(sc))
1560                 return EBUSY;
1561
1562         /* Deactivate TX scheduler. */
1563         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1564
1565         /* Set physical address of "keep warm" page (16-byte aligned). */
1566         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1567
1568         /* Initialize TX rings. */
1569         for (qid = 0; qid < nitems(sc->txq); qid++) {
1570                 struct iwm_tx_ring *txq = &sc->txq[qid];
1571
1572                 /* Set physical address of TX ring (256-byte aligned). */
1573                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1574                     txq->desc_dma.paddr >> 8);
1575                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1576                     "%s: loading ring %d descriptors (%p) at %lx\n",
1577                     __func__,
1578                     qid, txq->desc,
1579                     (unsigned long) (txq->desc_dma.paddr >> 8));
1580         }
1581
1582         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1583
1584         iwm_nic_unlock(sc);
1585
1586         return 0;
1587 }
1588
1589 static int
1590 iwm_nic_init(struct iwm_softc *sc)
1591 {
1592         int error;
1593
1594         iwm_apm_init(sc);
1595         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1596                 iwm_set_pwr(sc);
1597
1598         iwm_mvm_nic_config(sc);
1599
1600         if ((error = iwm_nic_rx_init(sc)) != 0)
1601                 return error;
1602
1603         /*
1604          * Ditto for TX, from iwn
1605          */
1606         if ((error = iwm_nic_tx_init(sc)) != 0)
1607                 return error;
1608
1609         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1610             "%s: shadow registers enabled\n", __func__);
1611         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1612
1613         return 0;
1614 }
1615
1616 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1617         IWM_MVM_TX_FIFO_VO,
1618         IWM_MVM_TX_FIFO_VI,
1619         IWM_MVM_TX_FIFO_BE,
1620         IWM_MVM_TX_FIFO_BK,
1621 };
1622
1623 static int
1624 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1625 {
1626         if (!iwm_nic_lock(sc)) {
1627                 device_printf(sc->sc_dev,
1628                     "%s: cannot enable txq %d\n",
1629                     __func__,
1630                     qid);
1631                 return EBUSY;
1632         }
1633
1634         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1635
1636         if (qid == IWM_MVM_CMD_QUEUE) {
1637                 /* unactivate before configuration */
1638                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1639                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1640                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1641
1642                 iwm_nic_unlock(sc);
1643
1644                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1645
1646                 if (!iwm_nic_lock(sc)) {
1647                         device_printf(sc->sc_dev,
1648                             "%s: cannot enable txq %d\n", __func__, qid);
1649                         return EBUSY;
1650                 }
1651                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1652                 iwm_nic_unlock(sc);
1653
1654                 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1655                 /* Set scheduler window size and frame limit. */
1656                 iwm_write_mem32(sc,
1657                     sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1658                     sizeof(uint32_t),
1659                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1660                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1661                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1662                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1663
1664                 if (!iwm_nic_lock(sc)) {
1665                         device_printf(sc->sc_dev,
1666                             "%s: cannot enable txq %d\n", __func__, qid);
1667                         return EBUSY;
1668                 }
1669                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1670                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1671                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1672                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1673                     IWM_SCD_QUEUE_STTS_REG_MSK);
1674         } else {
1675                 struct iwm_scd_txq_cfg_cmd cmd;
1676                 int error;
1677
1678                 iwm_nic_unlock(sc);
1679
1680                 memset(&cmd, 0, sizeof(cmd));
1681                 cmd.scd_queue = qid;
1682                 cmd.enable = 1;
1683                 cmd.sta_id = sta_id;
1684                 cmd.tx_fifo = fifo;
1685                 cmd.aggregate = 0;
1686                 cmd.window = IWM_FRAME_LIMIT;
1687
1688                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1689                     sizeof(cmd), &cmd);
1690                 if (error) {
1691                         device_printf(sc->sc_dev,
1692                             "cannot enable txq %d\n", qid);
1693                         return error;
1694                 }
1695
1696                 if (!iwm_nic_lock(sc))
1697                         return EBUSY;
1698         }
1699
1700         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1701             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1702
1703         iwm_nic_unlock(sc);
1704
1705         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1706             __func__, qid, fifo);
1707
1708         return 0;
1709 }
1710
1711 static int
1712 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1713 {
1714         int error, chnl;
1715
1716         int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1717             IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1718
1719         if (!iwm_nic_lock(sc))
1720                 return EBUSY;
1721
1722         iwm_ict_reset(sc);
1723
1724         iwm_nic_unlock(sc);
1725
1726         sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1727         if (scd_base_addr != 0 &&
1728             scd_base_addr != sc->scd_base_addr) {
1729                 device_printf(sc->sc_dev,
1730                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1731                     __func__, sc->scd_base_addr, scd_base_addr);
1732         }
1733
1734         /* reset context data, TX status and translation data */
1735         error = iwm_write_mem(sc,
1736             sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1737             NULL, clear_dwords);
1738         if (error)
1739                 return EBUSY;
1740
1741         if (!iwm_nic_lock(sc))
1742                 return EBUSY;
1743
1744         /* Set physical address of TX scheduler rings (1KB aligned). */
1745         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1746
1747         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1748
1749         iwm_nic_unlock(sc);
1750
1751         /* enable command channel */
1752         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1753         if (error)
1754                 return error;
1755
1756         if (!iwm_nic_lock(sc))
1757                 return EBUSY;
1758
1759         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1760
1761         /* Enable DMA channels. */
1762         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1763                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1764                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1765                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1766         }
1767
1768         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1769             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1770
1771         iwm_nic_unlock(sc);
1772
1773         /* Enable L1-Active */
1774         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1775                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1776                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1777         }
1778
1779         return error;
1780 }
1781
1782 /*
1783  * NVM read access and content parsing.  We do not support
1784  * external NVM or writing NVM.
1785  * iwlwifi/mvm/nvm.c
1786  */
1787
1788 /* Default NVM size to read */
1789 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1790
1791 #define IWM_NVM_WRITE_OPCODE 1
1792 #define IWM_NVM_READ_OPCODE 0
1793
1794 /* load nvm chunk response */
1795 enum {
1796         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1797         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1798 };
1799
1800 static int
1801 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1802         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1803 {
1804         struct iwm_nvm_access_cmd nvm_access_cmd = {
1805                 .offset = htole16(offset),
1806                 .length = htole16(length),
1807                 .type = htole16(section),
1808                 .op_code = IWM_NVM_READ_OPCODE,
1809         };
1810         struct iwm_nvm_access_resp *nvm_resp;
1811         struct iwm_rx_packet *pkt;
1812         struct iwm_host_cmd cmd = {
1813                 .id = IWM_NVM_ACCESS_CMD,
1814                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1815                 .data = { &nvm_access_cmd, },
1816         };
1817         int ret, bytes_read, offset_read;
1818         uint8_t *resp_data;
1819
1820         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1821
1822         ret = iwm_send_cmd(sc, &cmd);
1823         if (ret) {
1824                 device_printf(sc->sc_dev,
1825                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1826                 return ret;
1827         }
1828
1829         pkt = cmd.resp_pkt;
1830
1831         /* Extract NVM response */
1832         nvm_resp = (void *)pkt->data;
1833         ret = le16toh(nvm_resp->status);
1834         bytes_read = le16toh(nvm_resp->length);
1835         offset_read = le16toh(nvm_resp->offset);
1836         resp_data = nvm_resp->data;
1837         if (ret) {
1838                 if ((offset != 0) &&
1839                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1840                         /*
1841                          * meaning of NOT_VALID_ADDRESS:
1842                          * driver try to read chunk from address that is
1843                          * multiple of 2K and got an error since addr is empty.
1844                          * meaning of (offset != 0): driver already
1845                          * read valid data from another chunk so this case
1846                          * is not an error.
1847                          */
1848                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1849                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1850                                     offset);
1851                         *len = 0;
1852                         ret = 0;
1853                 } else {
1854                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1855                                     "NVM access command failed with status %d\n", ret);
1856                         ret = EIO;
1857                 }
1858                 goto exit;
1859         }
1860
1861         if (offset_read != offset) {
1862                 device_printf(sc->sc_dev,
1863                     "NVM ACCESS response with invalid offset %d\n",
1864                     offset_read);
1865                 ret = EINVAL;
1866                 goto exit;
1867         }
1868
1869         if (bytes_read > length) {
1870                 device_printf(sc->sc_dev,
1871                     "NVM ACCESS response with too much data "
1872                     "(%d bytes requested, %d bytes received)\n",
1873                     length, bytes_read);
1874                 ret = EINVAL;
1875                 goto exit;
1876         }
1877
1878         /* Write data to NVM */
1879         memcpy(data + offset, resp_data, bytes_read);
1880         *len = bytes_read;
1881
1882  exit:
1883         iwm_free_resp(sc, &cmd);
1884         return ret;
1885 }
1886
1887 /*
1888  * Reads an NVM section completely.
1889  * NICs prior to 7000 family don't have a real NVM, but just read
1890  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1891  * by uCode, we need to manually check in this case that we don't
1892  * overflow and try to read more than the EEPROM size.
1893  * For 7000 family NICs, we supply the maximal size we can read, and
1894  * the uCode fills the response with as much data as we can,
1895  * without overflowing, so no check is needed.
1896  */
1897 static int
1898 iwm_nvm_read_section(struct iwm_softc *sc,
1899         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1900 {
1901         uint16_t seglen, length, offset = 0;
1902         int ret;
1903
1904         /* Set nvm section read length */
1905         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1906
1907         seglen = length;
1908
1909         /* Read the NVM until exhausted (reading less than requested) */
1910         while (seglen == length) {
1911                 /* Check no memory assumptions fail and cause an overflow */
1912                 if ((size_read + offset + length) >
1913                     sc->cfg->eeprom_size) {
1914                         device_printf(sc->sc_dev,
1915                             "EEPROM size is too small for NVM\n");
1916                         return ENOBUFS;
1917                 }
1918
1919                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1920                 if (ret) {
1921                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1922                                     "Cannot read NVM from section %d offset %d, length %d\n",
1923                                     section, offset, length);
1924                         return ret;
1925                 }
1926                 offset += seglen;
1927         }
1928
1929         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1930                     "NVM section %d read completed\n", section);
1931         *len = offset;
1932         return 0;
1933 }
1934
1935 /*
1936  * BEGIN IWM_NVM_PARSE
1937  */
1938
1939 /* iwlwifi/iwl-nvm-parse.c */
1940
1941 /* NVM offsets (in words) definitions */
1942 enum iwm_nvm_offsets {
1943         /* NVM HW-Section offset (in words) definitions */
1944         IWM_HW_ADDR = 0x15,
1945
1946 /* NVM SW-Section offset (in words) definitions */
1947         IWM_NVM_SW_SECTION = 0x1C0,
1948         IWM_NVM_VERSION = 0,
1949         IWM_RADIO_CFG = 1,
1950         IWM_SKU = 2,
1951         IWM_N_HW_ADDRS = 3,
1952         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1953
1954 /* NVM calibration section offset (in words) definitions */
1955         IWM_NVM_CALIB_SECTION = 0x2B8,
1956         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1957 };
1958
1959 enum iwm_8000_nvm_offsets {
1960         /* NVM HW-Section offset (in words) definitions */
1961         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1962         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1963         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1964         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1965         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1966
1967         /* NVM SW-Section offset (in words) definitions */
1968         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1969         IWM_NVM_VERSION_8000 = 0,
1970         IWM_RADIO_CFG_8000 = 0,
1971         IWM_SKU_8000 = 2,
1972         IWM_N_HW_ADDRS_8000 = 3,
1973
1974         /* NVM REGULATORY -Section offset (in words) definitions */
1975         IWM_NVM_CHANNELS_8000 = 0,
1976         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1977         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1978         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1979
1980         /* NVM calibration section offset (in words) definitions */
1981         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1982         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1983 };
1984
1985 /* SKU Capabilities (actual values from NVM definition) */
1986 enum nvm_sku_bits {
1987         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1988         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1989         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1990         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1991 };
1992
1993 /* radio config bits (actual values from NVM definition) */
1994 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1995 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1996 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1997 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1998 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1999 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
2000
2001 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
2002 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
2003 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
2004 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
2005 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
2006 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
2007
2008 #define DEFAULT_MAX_TX_POWER 16
2009
2010 /**
2011  * enum iwm_nvm_channel_flags - channel flags in NVM
2012  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
2013  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
2014  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
2015  * @IWM_NVM_CHANNEL_RADAR: radar detection required
2016  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
2017  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
2018  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
2019  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
2020  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
2021  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
2022  */
2023 enum iwm_nvm_channel_flags {
2024         IWM_NVM_CHANNEL_VALID = (1 << 0),
2025         IWM_NVM_CHANNEL_IBSS = (1 << 1),
2026         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
2027         IWM_NVM_CHANNEL_RADAR = (1 << 4),
2028         IWM_NVM_CHANNEL_DFS = (1 << 7),
2029         IWM_NVM_CHANNEL_WIDE = (1 << 8),
2030         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
2031         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
2032         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2033 };
2034
2035 /*
2036  * Translate EEPROM flags to net80211.
2037  */
2038 static uint32_t
2039 iwm_eeprom_channel_flags(uint16_t ch_flags)
2040 {
2041         uint32_t nflags;
2042
2043         nflags = 0;
2044         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2045                 nflags |= IEEE80211_CHAN_PASSIVE;
2046         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2047                 nflags |= IEEE80211_CHAN_NOADHOC;
2048         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2049                 nflags |= IEEE80211_CHAN_DFS;
2050                 /* Just in case. */
2051                 nflags |= IEEE80211_CHAN_NOADHOC;
2052         }
2053
2054         return (nflags);
2055 }
2056
2057 static void
2058 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2059     int maxchans, int *nchans, int ch_idx, size_t ch_num,
2060     const uint8_t bands[])
2061 {
2062         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2063         uint32_t nflags;
2064         uint16_t ch_flags;
2065         uint8_t ieee;
2066         int error;
2067
2068         for (; ch_idx < ch_num; ch_idx++) {
2069                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2070                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2071                         ieee = iwm_nvm_channels[ch_idx];
2072                 else
2073                         ieee = iwm_nvm_channels_8000[ch_idx];
2074
2075                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2076                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2077                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
2078                             ieee, ch_flags,
2079                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2080                             "5.2" : "2.4");
2081                         continue;
2082                 }
2083
2084                 nflags = iwm_eeprom_channel_flags(ch_flags);
2085                 error = ieee80211_add_channel(chans, maxchans, nchans,
2086                     ieee, 0, 0, nflags, bands);
2087                 if (error != 0)
2088                         break;
2089
2090                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2091                     "Ch. %d Flags %x [%sGHz] - Added\n",
2092                     ieee, ch_flags,
2093                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2094                     "5.2" : "2.4");
2095         }
2096 }
2097
2098 static void
2099 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2100     struct ieee80211_channel chans[])
2101 {
2102         struct iwm_softc *sc = ic->ic_softc;
2103         struct iwm_nvm_data *data = sc->nvm_data;
2104         uint8_t bands[IEEE80211_MODE_BYTES];
2105         size_t ch_num;
2106
2107         memset(bands, 0, sizeof(bands));
2108         /* 1-13: 11b/g channels. */
2109         setbit(bands, IEEE80211_MODE_11B);
2110         setbit(bands, IEEE80211_MODE_11G);
2111         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2112             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2113
2114         /* 14: 11b channel only. */
2115         clrbit(bands, IEEE80211_MODE_11G);
2116         iwm_add_channel_band(sc, chans, maxchans, nchans,
2117             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2118
2119         if (data->sku_cap_band_52GHz_enable) {
2120                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2121                         ch_num = nitems(iwm_nvm_channels);
2122                 else
2123                         ch_num = nitems(iwm_nvm_channels_8000);
2124                 memset(bands, 0, sizeof(bands));
2125                 setbit(bands, IEEE80211_MODE_11A);
2126                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2127                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2128         }
2129 }
2130
2131 static void
2132 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2133         const uint16_t *mac_override, const uint16_t *nvm_hw)
2134 {
2135         const uint8_t *hw_addr;
2136
2137         if (mac_override) {
2138                 static const uint8_t reserved_mac[] = {
2139                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2140                 };
2141
2142                 hw_addr = (const uint8_t *)(mac_override +
2143                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2144
2145                 /*
2146                  * Store the MAC address from MAO section.
2147                  * No byte swapping is required in MAO section
2148                  */
2149                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2150
2151                 /*
2152                  * Force the use of the OTP MAC address in case of reserved MAC
2153                  * address in the NVM, or if address is given but invalid.
2154                  */
2155                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2156                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2157                     iwm_is_valid_ether_addr(data->hw_addr) &&
2158                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2159                         return;
2160
2161                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2162                     "%s: mac address from nvm override section invalid\n",
2163                     __func__);
2164         }
2165
2166         if (nvm_hw) {
2167                 /* read the mac address from WFMP registers */
2168                 uint32_t mac_addr0 =
2169                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2170                 uint32_t mac_addr1 =
2171                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2172
2173                 hw_addr = (const uint8_t *)&mac_addr0;
2174                 data->hw_addr[0] = hw_addr[3];
2175                 data->hw_addr[1] = hw_addr[2];
2176                 data->hw_addr[2] = hw_addr[1];
2177                 data->hw_addr[3] = hw_addr[0];
2178
2179                 hw_addr = (const uint8_t *)&mac_addr1;
2180                 data->hw_addr[4] = hw_addr[1];
2181                 data->hw_addr[5] = hw_addr[0];
2182
2183                 return;
2184         }
2185
2186         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2187         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2188 }
2189
2190 static int
2191 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2192             const uint16_t *phy_sku)
2193 {
2194         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2195                 return le16_to_cpup(nvm_sw + IWM_SKU);
2196
2197         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2198 }
2199
2200 static int
2201 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2202 {
2203         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2204                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2205         else
2206                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2207                                                 IWM_NVM_VERSION_8000));
2208 }
2209
2210 static int
2211 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2212                   const uint16_t *phy_sku)
2213 {
2214         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2215                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2216
2217         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2218 }
2219
2220 static int
2221 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2222 {
2223         int n_hw_addr;
2224
2225         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2226                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2227
2228         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2229
2230         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2231 }
2232
2233 static void
2234 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2235                   uint32_t radio_cfg)
2236 {
2237         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2238                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2239                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2240                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2241                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2242                 return;
2243         }
2244
2245         /* set the radio configuration for family 8000 */
2246         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2247         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2248         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2249         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2250         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2251         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2252 }
2253
2254 static int
2255 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2256                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2257 {
2258 #ifdef notyet /* for FAMILY 9000 */
2259         if (cfg->mac_addr_from_csr) {
2260                 iwm_set_hw_address_from_csr(sc, data);
2261         } else
2262 #endif
2263         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2264                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2265
2266                 /* The byte order is little endian 16 bit, meaning 214365 */
2267                 data->hw_addr[0] = hw_addr[1];
2268                 data->hw_addr[1] = hw_addr[0];
2269                 data->hw_addr[2] = hw_addr[3];
2270                 data->hw_addr[3] = hw_addr[2];
2271                 data->hw_addr[4] = hw_addr[5];
2272                 data->hw_addr[5] = hw_addr[4];
2273         } else {
2274                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2275         }
2276
2277         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2278                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2279                 return EINVAL;
2280         }
2281
2282         return 0;
2283 }
2284
2285 static struct iwm_nvm_data *
2286 iwm_parse_nvm_data(struct iwm_softc *sc,
2287                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2288                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2289                    const uint16_t *phy_sku, const uint16_t *regulatory)
2290 {
2291         struct iwm_nvm_data *data;
2292         uint32_t sku, radio_cfg;
2293
2294         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2295                 data = malloc(sizeof(*data) +
2296                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2297                     M_DEVBUF, M_NOWAIT | M_ZERO);
2298         } else {
2299                 data = malloc(sizeof(*data) +
2300                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2301                     M_DEVBUF, M_NOWAIT | M_ZERO);
2302         }
2303         if (!data)
2304                 return NULL;
2305
2306         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2307
2308         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2309         iwm_set_radio_cfg(sc, data, radio_cfg);
2310
2311         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2312         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2313         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2314         data->sku_cap_11n_enable = 0;
2315
2316         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2317
2318         /* If no valid mac address was found - bail out */
2319         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2320                 free(data, M_DEVBUF);
2321                 return NULL;
2322         }
2323
2324         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2325                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2326                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2327         } else {
2328                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2329                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2330         }
2331
2332         return data;
2333 }
2334
2335 static void
2336 iwm_free_nvm_data(struct iwm_nvm_data *data)
2337 {
2338         if (data != NULL)
2339                 free(data, M_DEVBUF);
2340 }
2341
2342 static struct iwm_nvm_data *
2343 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2344 {
2345         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2346
2347         /* Checking for required sections */
2348         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2349                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2350                     !sections[sc->cfg->nvm_hw_section_num].data) {
2351                         device_printf(sc->sc_dev,
2352                             "Can't parse empty OTP/NVM sections\n");
2353                         return NULL;
2354                 }
2355         } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2356                 /* SW and REGULATORY sections are mandatory */
2357                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2358                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2359                         device_printf(sc->sc_dev,
2360                             "Can't parse empty OTP/NVM sections\n");
2361                         return NULL;
2362                 }
2363                 /* MAC_OVERRIDE or at least HW section must exist */
2364                 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2365                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2366                         device_printf(sc->sc_dev,
2367                             "Can't parse mac_address, empty sections\n");
2368                         return NULL;
2369                 }
2370
2371                 /* PHY_SKU section is mandatory in B0 */
2372                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2373                         device_printf(sc->sc_dev,
2374                             "Can't parse phy_sku in B0, empty sections\n");
2375                         return NULL;
2376                 }
2377         } else {
2378                 panic("unknown device family %d\n", sc->cfg->device_family);
2379         }
2380
2381         hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2382         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2383         calib = (const uint16_t *)
2384             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2385         regulatory = (const uint16_t *)
2386             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2387         mac_override = (const uint16_t *)
2388             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2389         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2390
2391         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2392             phy_sku, regulatory);
2393 }
2394
2395 static int
2396 iwm_nvm_init(struct iwm_softc *sc)
2397 {
2398         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2399         int i, ret, section;
2400         uint32_t size_read = 0;
2401         uint8_t *nvm_buffer, *temp;
2402         uint16_t len;
2403
2404         memset(nvm_sections, 0, sizeof(nvm_sections));
2405
2406         if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2407                 return EINVAL;
2408
2409         /* load NVM values from nic */
2410         /* Read From FW NVM */
2411         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2412
2413         nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2414         if (!nvm_buffer)
2415                 return ENOMEM;
2416         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2417                 /* we override the constness for initial read */
2418                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2419                                            &len, size_read);
2420                 if (ret)
2421                         continue;
2422                 size_read += len;
2423                 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2424                 if (!temp) {
2425                         ret = ENOMEM;
2426                         break;
2427                 }
2428                 memcpy(temp, nvm_buffer, len);
2429
2430                 nvm_sections[section].data = temp;
2431                 nvm_sections[section].length = len;
2432         }
2433         if (!size_read)
2434                 device_printf(sc->sc_dev, "OTP is blank\n");
2435         free(nvm_buffer, M_DEVBUF);
2436
2437         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2438         if (!sc->nvm_data)
2439                 return EINVAL;
2440         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2441                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2442
2443         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2444                 if (nvm_sections[i].data != NULL)
2445                         free(nvm_sections[i].data, M_DEVBUF);
2446         }
2447
2448         return 0;
2449 }
2450
2451 static int
2452 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2453         const struct iwm_fw_desc *section)
2454 {
2455         struct iwm_dma_info *dma = &sc->fw_dma;
2456         uint8_t *v_addr;
2457         bus_addr_t p_addr;
2458         uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2459         int ret = 0;
2460
2461         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2462                     "%s: [%d] uCode section being loaded...\n",
2463                     __func__, section_num);
2464
2465         v_addr = dma->vaddr;
2466         p_addr = dma->paddr;
2467
2468         for (offset = 0; offset < section->len; offset += chunk_sz) {
2469                 uint32_t copy_size, dst_addr;
2470                 int extended_addr = FALSE;
2471
2472                 copy_size = MIN(chunk_sz, section->len - offset);
2473                 dst_addr = section->offset + offset;
2474
2475                 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2476                     dst_addr <= IWM_FW_MEM_EXTENDED_END)
2477                         extended_addr = TRUE;
2478
2479                 if (extended_addr)
2480                         iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2481                                           IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2482
2483                 memcpy(v_addr, (const uint8_t *)section->data + offset,
2484                     copy_size);
2485                 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2486                 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2487                                                    copy_size);
2488
2489                 if (extended_addr)
2490                         iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2491                                             IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2492
2493                 if (ret) {
2494                         device_printf(sc->sc_dev,
2495                             "%s: Could not load the [%d] uCode section\n",
2496                             __func__, section_num);
2497                         break;
2498                 }
2499         }
2500
2501         return ret;
2502 }
2503
2504 /*
2505  * ucode
2506  */
2507 static int
2508 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2509                              bus_addr_t phy_addr, uint32_t byte_cnt)
2510 {
2511         int ret;
2512
2513         sc->sc_fw_chunk_done = 0;
2514
2515         if (!iwm_nic_lock(sc))
2516                 return EBUSY;
2517
2518         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2519             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2520
2521         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2522             dst_addr);
2523
2524         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2525             phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2526
2527         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2528             (iwm_get_dma_hi_addr(phy_addr)
2529              << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2530
2531         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2532             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2533             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2534             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2535
2536         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2537             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2538             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2539             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2540
2541         iwm_nic_unlock(sc);
2542
2543         /* wait up to 5s for this segment to load */
2544         ret = 0;
2545         while (!sc->sc_fw_chunk_done) {
2546                 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2547                 if (ret)
2548                         break;
2549         }
2550
2551         if (ret != 0) {
2552                 device_printf(sc->sc_dev,
2553                     "fw chunk addr 0x%x len %d failed to load\n",
2554                     dst_addr, byte_cnt);
2555                 return ETIMEDOUT;
2556         }
2557
2558         return 0;
2559 }
2560
2561 static int
2562 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2563         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2564 {
2565         int shift_param;
2566         int i, ret = 0, sec_num = 0x1;
2567         uint32_t val, last_read_idx = 0;
2568
2569         if (cpu == 1) {
2570                 shift_param = 0;
2571                 *first_ucode_section = 0;
2572         } else {
2573                 shift_param = 16;
2574                 (*first_ucode_section)++;
2575         }
2576
2577         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2578                 last_read_idx = i;
2579
2580                 /*
2581                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2582                  * CPU1 to CPU2.
2583                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2584                  * CPU2 non paged to CPU2 paging sec.
2585                  */
2586                 if (!image->fw_sect[i].data ||
2587                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2588                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2589                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2590                                     "Break since Data not valid or Empty section, sec = %d\n",
2591                                     i);
2592                         break;
2593                 }
2594                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2595                 if (ret)
2596                         return ret;
2597
2598                 /* Notify the ucode of the loaded section number and status */
2599                 if (iwm_nic_lock(sc)) {
2600                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2601                         val = val | (sec_num << shift_param);
2602                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2603                         sec_num = (sec_num << 1) | 0x1;
2604                         iwm_nic_unlock(sc);
2605                 }
2606         }
2607
2608         *first_ucode_section = last_read_idx;
2609
2610         iwm_enable_interrupts(sc);
2611
2612         if (iwm_nic_lock(sc)) {
2613                 if (cpu == 1)
2614                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2615                 else
2616                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2617                 iwm_nic_unlock(sc);
2618         }
2619
2620         return 0;
2621 }
2622
2623 static int
2624 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2625         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2626 {
2627         int shift_param;
2628         int i, ret = 0;
2629         uint32_t last_read_idx = 0;
2630
2631         if (cpu == 1) {
2632                 shift_param = 0;
2633                 *first_ucode_section = 0;
2634         } else {
2635                 shift_param = 16;
2636                 (*first_ucode_section)++;
2637         }
2638
2639         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2640                 last_read_idx = i;
2641
2642                 /*
2643                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2644                  * CPU1 to CPU2.
2645                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2646                  * CPU2 non paged to CPU2 paging sec.
2647                  */
2648                 if (!image->fw_sect[i].data ||
2649                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2650                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2651                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2652                                     "Break since Data not valid or Empty section, sec = %d\n",
2653                                      i);
2654                         break;
2655                 }
2656
2657                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2658                 if (ret)
2659                         return ret;
2660         }
2661
2662         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2663                 iwm_set_bits_prph(sc,
2664                                   IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2665                                   (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2666                                    IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2667                                    IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2668                                         shift_param);
2669
2670         *first_ucode_section = last_read_idx;
2671
2672         return 0;
2673
2674 }
2675
2676 static int
2677 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2678         const struct iwm_fw_sects *image)
2679 {
2680         int ret = 0;
2681         int first_ucode_section;
2682
2683         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2684                      image->is_dual_cpus ? "Dual" : "Single");
2685
2686         /* load to FW the binary non secured sections of CPU1 */
2687         ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2688         if (ret)
2689                 return ret;
2690
2691         if (image->is_dual_cpus) {
2692                 /* set CPU2 header address */
2693                 iwm_write_prph(sc,
2694                                IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2695                                IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2696
2697                 /* load to FW the binary sections of CPU2 */
2698                 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2699                                                  &first_ucode_section);
2700                 if (ret)
2701                         return ret;
2702         }
2703
2704         iwm_enable_interrupts(sc);
2705
2706         /* release CPU reset */
2707         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2708
2709         return 0;
2710 }
2711
2712 int
2713 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2714         const struct iwm_fw_sects *image)
2715 {
2716         int ret = 0;
2717         int first_ucode_section;
2718
2719         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2720                     image->is_dual_cpus ? "Dual" : "Single");
2721
2722         /* configure the ucode to be ready to get the secured image */
2723         /* release CPU reset */
2724         iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2725
2726         /* load to FW the binary Secured sections of CPU1 */
2727         ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2728             &first_ucode_section);
2729         if (ret)
2730                 return ret;
2731
2732         /* load to FW the binary sections of CPU2 */
2733         return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2734             &first_ucode_section);
2735 }
2736
2737 /* XXX Get rid of this definition */
2738 static inline void
2739 iwm_enable_fw_load_int(struct iwm_softc *sc)
2740 {
2741         IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2742         sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2743         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2744 }
2745
2746 /* XXX Add proper rfkill support code */
2747 static int
2748 iwm_start_fw(struct iwm_softc *sc,
2749         const struct iwm_fw_sects *fw)
2750 {
2751         int ret;
2752
2753         /* This may fail if AMT took ownership of the device */
2754         if (iwm_prepare_card_hw(sc)) {
2755                 device_printf(sc->sc_dev,
2756                     "%s: Exit HW not ready\n", __func__);
2757                 ret = EIO;
2758                 goto out;
2759         }
2760
2761         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2762
2763         iwm_disable_interrupts(sc);
2764
2765         /* make sure rfkill handshake bits are cleared */
2766         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2767         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2768             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2769
2770         /* clear (again), then enable host interrupts */
2771         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2772
2773         ret = iwm_nic_init(sc);
2774         if (ret) {
2775                 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2776                 goto out;
2777         }
2778
2779         /*
2780          * Now, we load the firmware and don't want to be interrupted, even
2781          * by the RF-Kill interrupt (hence mask all the interrupt besides the
2782          * FH_TX interrupt which is needed to load the firmware). If the
2783          * RF-Kill switch is toggled, we will find out after having loaded
2784          * the firmware and return the proper value to the caller.
2785          */
2786         iwm_enable_fw_load_int(sc);
2787
2788         /* really make sure rfkill handshake bits are cleared */
2789         /* maybe we should write a few times more?  just to make sure */
2790         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2791         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2792
2793         /* Load the given image to the HW */
2794         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2795                 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2796         else
2797                 ret = iwm_pcie_load_given_ucode(sc, fw);
2798
2799         /* XXX re-check RF-Kill state */
2800
2801 out:
2802         return ret;
2803 }
2804
2805 static int
2806 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2807 {
2808         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2809                 .valid = htole32(valid_tx_ant),
2810         };
2811
2812         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2813             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2814 }
2815
2816 /* iwlwifi: mvm/fw.c */
2817 static int
2818 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2819 {
2820         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2821         enum iwm_ucode_type ucode_type = sc->cur_ucode;
2822
2823         /* Set parameters */
2824         phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2825         phy_cfg_cmd.calib_control.event_trigger =
2826             sc->sc_default_calib[ucode_type].event_trigger;
2827         phy_cfg_cmd.calib_control.flow_trigger =
2828             sc->sc_default_calib[ucode_type].flow_trigger;
2829
2830         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2831             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2832         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2833             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2834 }
2835
2836 static int
2837 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2838 {
2839         struct iwm_mvm_alive_data *alive_data = data;
2840         struct iwm_mvm_alive_resp_ver1 *palive1;
2841         struct iwm_mvm_alive_resp_ver2 *palive2;
2842         struct iwm_mvm_alive_resp *palive;
2843
2844         if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2845                 palive1 = (void *)pkt->data;
2846
2847                 sc->support_umac_log = FALSE;
2848                 sc->error_event_table =
2849                         le32toh(palive1->error_event_table_ptr);
2850                 sc->log_event_table =
2851                         le32toh(palive1->log_event_table_ptr);
2852                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2853
2854                 alive_data->valid = le16toh(palive1->status) ==
2855                                     IWM_ALIVE_STATUS_OK;
2856                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2857                             "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2858                              le16toh(palive1->status), palive1->ver_type,
2859                              palive1->ver_subtype, palive1->flags);
2860         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2861                 palive2 = (void *)pkt->data;
2862                 sc->error_event_table =
2863                         le32toh(palive2->error_event_table_ptr);
2864                 sc->log_event_table =
2865                         le32toh(palive2->log_event_table_ptr);
2866                 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2867                 sc->umac_error_event_table =
2868                         le32toh(palive2->error_info_addr);
2869
2870                 alive_data->valid = le16toh(palive2->status) ==
2871                                     IWM_ALIVE_STATUS_OK;
2872                 if (sc->umac_error_event_table)
2873                         sc->support_umac_log = TRUE;
2874
2875                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2876                             "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2877                             le16toh(palive2->status), palive2->ver_type,
2878                             palive2->ver_subtype, palive2->flags);
2879
2880                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2881                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2882                             palive2->umac_major, palive2->umac_minor);
2883         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2884                 palive = (void *)pkt->data;
2885
2886                 sc->error_event_table =
2887                         le32toh(palive->error_event_table_ptr);
2888                 sc->log_event_table =
2889                         le32toh(palive->log_event_table_ptr);
2890                 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2891                 sc->umac_error_event_table =
2892                         le32toh(palive->error_info_addr);
2893
2894                 alive_data->valid = le16toh(palive->status) ==
2895                                     IWM_ALIVE_STATUS_OK;
2896                 if (sc->umac_error_event_table)
2897                         sc->support_umac_log = TRUE;
2898
2899                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2900                             "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2901                             le16toh(palive->status), palive->ver_type,
2902                             palive->ver_subtype, palive->flags);
2903
2904                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2905                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2906                             le32toh(palive->umac_major),
2907                             le32toh(palive->umac_minor));
2908         }
2909
2910         return TRUE;
2911 }
2912
2913 static int
2914 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2915         struct iwm_rx_packet *pkt, void *data)
2916 {
2917         struct iwm_phy_db *phy_db = data;
2918
2919         if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2920                 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2921                         device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2922                             __func__, pkt->hdr.code);
2923                 }
2924                 return TRUE;
2925         }
2926
2927         if (iwm_phy_db_set_section(phy_db, pkt)) {
2928                 device_printf(sc->sc_dev,
2929                     "%s: iwm_phy_db_set_section failed\n", __func__);
2930         }
2931
2932         return FALSE;
2933 }
2934
2935 static int
2936 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2937         enum iwm_ucode_type ucode_type)
2938 {
2939         struct iwm_notification_wait alive_wait;
2940         struct iwm_mvm_alive_data alive_data;
2941         const struct iwm_fw_sects *fw;
2942         enum iwm_ucode_type old_type = sc->cur_ucode;
2943         int error;
2944         static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2945
2946         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2947                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2948                         error);
2949                 return error;
2950         }
2951         fw = &sc->sc_fw.fw_sects[ucode_type];
2952         sc->cur_ucode = ucode_type;
2953         sc->ucode_loaded = FALSE;
2954
2955         memset(&alive_data, 0, sizeof(alive_data));
2956         iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2957                                    alive_cmd, nitems(alive_cmd),
2958                                    iwm_alive_fn, &alive_data);
2959
2960         error = iwm_start_fw(sc, fw);
2961         if (error) {
2962                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2963                 sc->cur_ucode = old_type;
2964                 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2965                 return error;
2966         }
2967
2968         /*
2969          * Some things may run in the background now, but we
2970          * just wait for the ALIVE notification here.
2971          */
2972         IWM_UNLOCK(sc);
2973         error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2974                                       IWM_MVM_UCODE_ALIVE_TIMEOUT);
2975         IWM_LOCK(sc);
2976         if (error) {
2977                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2978                         device_printf(sc->sc_dev,
2979                             "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2980                             iwm_read_prph(sc, IWM_SB_CPU_1_STATUS),
2981                             iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2982                 }
2983                 sc->cur_ucode = old_type;
2984                 return error;
2985         }
2986
2987         if (!alive_data.valid) {
2988                 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2989                     __func__);
2990                 sc->cur_ucode = old_type;
2991                 return EIO;
2992         }
2993
2994         iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2995
2996         /*
2997          * configure and operate fw paging mechanism.
2998          * driver configures the paging flow only once, CPU2 paging image
2999          * included in the IWM_UCODE_INIT image.
3000          */
3001         if (fw->paging_mem_size) {
3002                 /* XXX implement FW paging */
3003                 device_printf(sc->sc_dev,
3004                     "%s: XXX FW paging not implemented yet\n", __func__);
3005         }
3006
3007         if (!error)
3008                 sc->ucode_loaded = TRUE;
3009         return error;
3010 }
3011
3012 /*
3013  * mvm misc bits
3014  */
3015
3016 /*
3017  * follows iwlwifi/fw.c
3018  */
3019 static int
3020 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3021 {
3022         struct iwm_notification_wait calib_wait;
3023         static const uint16_t init_complete[] = {
3024                 IWM_INIT_COMPLETE_NOTIF,
3025                 IWM_CALIB_RES_NOTIF_PHY_DB
3026         };
3027         int ret;
3028
3029         /* do not operate with rfkill switch turned on */
3030         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3031                 device_printf(sc->sc_dev,
3032                     "radio is disabled by hardware switch\n");
3033                 return EPERM;
3034         }
3035
3036         iwm_init_notification_wait(sc->sc_notif_wait,
3037                                    &calib_wait,
3038                                    init_complete,
3039                                    nitems(init_complete),
3040                                    iwm_wait_phy_db_entry,
3041                                    sc->sc_phy_db);
3042
3043         /* Will also start the device */
3044         ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
3045         if (ret) {
3046                 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
3047                     ret);
3048                 goto error;
3049         }
3050
3051         if (justnvm) {
3052                 /* Read nvm */
3053                 ret = iwm_nvm_init(sc);
3054                 if (ret) {
3055                         device_printf(sc->sc_dev, "failed to read nvm\n");
3056                         goto error;
3057                 }
3058                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3059                 goto error;
3060         }
3061
3062         ret = iwm_send_bt_init_conf(sc);
3063         if (ret) {
3064                 device_printf(sc->sc_dev,
3065                     "failed to send bt coex configuration: %d\n", ret);
3066                 goto error;
3067         }
3068
3069         /* Init Smart FIFO. */
3070         ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
3071         if (ret)
3072                 goto error;
3073
3074         /* Send TX valid antennas before triggering calibrations */
3075         ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3076         if (ret) {
3077                 device_printf(sc->sc_dev,
3078                     "failed to send antennas before calibration: %d\n", ret);
3079                 goto error;
3080         }
3081
3082         /*
3083          * Send phy configurations command to init uCode
3084          * to start the 16.0 uCode init image internal calibrations.
3085          */
3086         ret = iwm_send_phy_cfg_cmd(sc);
3087         if (ret) {
3088                 device_printf(sc->sc_dev,
3089                     "%s: Failed to run INIT calibrations: %d\n",
3090                     __func__, ret);
3091                 goto error;
3092         }
3093
3094         /*
3095          * Nothing to do but wait for the init complete notification
3096          * from the firmware.
3097          */
3098         IWM_UNLOCK(sc);
3099         ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3100             IWM_MVM_UCODE_CALIB_TIMEOUT);
3101         IWM_LOCK(sc);
3102
3103
3104         goto out;
3105
3106 error:
3107         iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3108 out:
3109         return ret;
3110 }
3111
3112 /*
3113  * receive side
3114  */
3115
3116 /* (re)stock rx ring, called at init-time and at runtime */
3117 static int
3118 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3119 {
3120         struct iwm_rx_ring *ring = &sc->rxq;
3121         struct iwm_rx_data *data = &ring->data[idx];
3122         struct mbuf *m;
3123         bus_dmamap_t dmamap = NULL;
3124         bus_dma_segment_t seg;
3125         int nsegs, error;
3126
3127         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3128         if (m == NULL)
3129                 return ENOBUFS;
3130
3131         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3132         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3133             &seg, &nsegs, BUS_DMA_NOWAIT);
3134         if (error != 0) {
3135                 device_printf(sc->sc_dev,
3136                     "%s: can't map mbuf, error %d\n", __func__, error);
3137                 goto fail;
3138         }
3139
3140         if (data->m != NULL)
3141                 bus_dmamap_unload(ring->data_dmat, data->map);
3142
3143         /* Swap ring->spare_map with data->map */
3144         dmamap = data->map;
3145         data->map = ring->spare_map;
3146         ring->spare_map = dmamap;
3147
3148         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3149         data->m = m;
3150
3151         /* Update RX descriptor. */
3152         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3153         ring->desc[idx] = htole32(seg.ds_addr >> 8);
3154         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3155             BUS_DMASYNC_PREWRITE);
3156
3157         return 0;
3158 fail:
3159         m_freem(m);
3160         return error;
3161 }
3162
3163 /* iwlwifi: mvm/rx.c */
3164 #define IWM_RSSI_OFFSET 50
3165 static int
3166 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3167 {
3168         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3169         uint32_t agc_a, agc_b;
3170         uint32_t val;
3171
3172         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3173         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3174         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3175
3176         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3177         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3178         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3179
3180         /*
3181          * dBm = rssi dB - agc dB - constant.
3182          * Higher AGC (higher radio gain) means lower signal.
3183          */
3184         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3185         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3186         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3187
3188         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3189             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3190             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
3191
3192         return max_rssi_dbm;
3193 }
3194
3195 /* iwlwifi: mvm/rx.c */
3196 /*
3197  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3198  * values are reported by the fw as positive values - need to negate
3199  * to obtain their dBM.  Account for missing antennas by replacing 0
3200  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3201  */
3202 static int
3203 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3204 {
3205         int energy_a, energy_b, energy_c, max_energy;
3206         uint32_t val;
3207
3208         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3209         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3210             IWM_RX_INFO_ENERGY_ANT_A_POS;
3211         energy_a = energy_a ? -energy_a : -256;
3212         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3213             IWM_RX_INFO_ENERGY_ANT_B_POS;
3214         energy_b = energy_b ? -energy_b : -256;
3215         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3216             IWM_RX_INFO_ENERGY_ANT_C_POS;
3217         energy_c = energy_c ? -energy_c : -256;
3218         max_energy = MAX(energy_a, energy_b);
3219         max_energy = MAX(max_energy, energy_c);
3220
3221         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3222             "energy In A %d B %d C %d , and max %d\n",
3223             energy_a, energy_b, energy_c, max_energy);
3224
3225         return max_energy;
3226 }
3227
3228 static void
3229 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
3230         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3231 {
3232         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3233
3234         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3235         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3236
3237         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3238 }
3239
3240 /*
3241  * Retrieve the average noise (in dBm) among receivers.
3242  */
3243 static int
3244 iwm_get_noise(struct iwm_softc *sc,
3245     const struct iwm_mvm_statistics_rx_non_phy *stats)
3246 {
3247         int i, total, nbant, noise;
3248
3249         total = nbant = noise = 0;
3250         for (i = 0; i < 3; i++) {
3251                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3252                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3253                     __func__,
3254                     i,
3255                     noise);
3256
3257                 if (noise) {
3258                         total += noise;
3259                         nbant++;
3260                 }
3261         }
3262
3263         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3264             __func__, nbant, total);
3265 #if 0
3266         /* There should be at least one antenna but check anyway. */
3267         return (nbant == 0) ? -127 : (total / nbant) - 107;
3268 #else
3269         /* For now, just hard-code it to -96 to be safe */
3270         return (-96);
3271 #endif
3272 }
3273
3274 /*
3275  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3276  *
3277  * Handles the actual data of the Rx packet from the fw
3278  */
3279 static void
3280 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3281         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3282 {
3283         struct ieee80211com *ic = &sc->sc_ic;
3284         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3285         struct ieee80211_frame *wh;
3286         struct ieee80211_node *ni;
3287         struct ieee80211_rx_stats rxs;
3288         struct mbuf *m;
3289         struct iwm_rx_phy_info *phy_info;
3290         struct iwm_rx_mpdu_res_start *rx_res;
3291         uint32_t len;
3292         uint32_t rx_pkt_status;
3293         int rssi;
3294
3295         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3296
3297         phy_info = &sc->sc_last_phy_info;
3298         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3299         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3300         len = le16toh(rx_res->byte_count);
3301         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3302
3303         m = data->m;
3304         m->m_data = pkt->data + sizeof(*rx_res);
3305         m->m_pkthdr.len = m->m_len = len;
3306
3307         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3308                 device_printf(sc->sc_dev,
3309                     "dsp size out of range [0,20]: %d\n",
3310                     phy_info->cfg_phy_cnt);
3311                 goto fail;
3312         }
3313
3314         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3315             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3316                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3317                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3318                 goto fail;
3319         }
3320
3321         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3322                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3323         } else {
3324                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3325         }
3326
3327         /* Note: RSSI is absolute (ie a -ve value) */
3328         if (rssi < IWM_MIN_DBM)
3329                 rssi = IWM_MIN_DBM;
3330         else if (rssi > IWM_MAX_DBM)
3331                 rssi = IWM_MAX_DBM;
3332
3333         /* Map it to relative value */
3334         rssi = rssi - sc->sc_noise;
3335
3336         /* replenish ring for the buffer we're going to feed to the sharks */
3337         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3338                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3339                     __func__);
3340                 goto fail;
3341         }
3342
3343         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3344             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3345
3346         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3347
3348         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3349             "%s: phy_info: channel=%d, flags=0x%08x\n",
3350             __func__,
3351             le16toh(phy_info->channel),
3352             le16toh(phy_info->phy_flags));
3353
3354         /*
3355          * Populate an RX state struct with the provided information.
3356          */
3357         bzero(&rxs, sizeof(rxs));
3358         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3359         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3360         rxs.c_ieee = le16toh(phy_info->channel);
3361         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3362                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3363         } else {
3364                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3365         }
3366
3367         /* rssi is in 1/2db units */
3368         rxs.rssi = rssi * 2;
3369         rxs.nf = sc->sc_noise;
3370
3371         if (ieee80211_radiotap_active_vap(vap)) {
3372                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3373
3374                 tap->wr_flags = 0;
3375                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3376                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3377                 tap->wr_chan_freq = htole16(rxs.c_freq);
3378                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3379                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3380                 tap->wr_dbm_antsignal = (int8_t)rssi;
3381                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3382                 tap->wr_tsft = phy_info->system_timestamp;
3383                 switch (phy_info->rate) {
3384                 /* CCK rates. */
3385                 case  10: tap->wr_rate =   2; break;
3386                 case  20: tap->wr_rate =   4; break;
3387                 case  55: tap->wr_rate =  11; break;
3388                 case 110: tap->wr_rate =  22; break;
3389                 /* OFDM rates. */
3390                 case 0xd: tap->wr_rate =  12; break;
3391                 case 0xf: tap->wr_rate =  18; break;
3392                 case 0x5: tap->wr_rate =  24; break;
3393                 case 0x7: tap->wr_rate =  36; break;
3394                 case 0x9: tap->wr_rate =  48; break;
3395                 case 0xb: tap->wr_rate =  72; break;
3396                 case 0x1: tap->wr_rate =  96; break;
3397                 case 0x3: tap->wr_rate = 108; break;
3398                 /* Unknown rate: should not happen. */
3399                 default:  tap->wr_rate =   0;
3400                 }
3401         }
3402
3403         IWM_UNLOCK(sc);
3404         if (ni != NULL) {
3405                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3406                 ieee80211_input_mimo(ni, m, &rxs);
3407                 ieee80211_free_node(ni);
3408         } else {
3409                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3410                 ieee80211_input_mimo_all(ic, m, &rxs);
3411         }
3412         IWM_LOCK(sc);
3413
3414         return;
3415
3416 fail:   counter_u64_add(ic->ic_ierrors, 1);
3417 }
3418
3419 static int
3420 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3421         struct iwm_node *in)
3422 {
3423         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3424         struct ieee80211_node *ni = &in->in_ni;
3425         struct ieee80211vap *vap = ni->ni_vap;
3426         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3427         int failack = tx_resp->failure_frame;
3428
3429         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3430
3431         /* Update rate control statistics. */
3432         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3433             __func__,
3434             (int) le16toh(tx_resp->status.status),
3435             (int) le16toh(tx_resp->status.sequence),
3436             tx_resp->frame_count,
3437             tx_resp->bt_kill_count,
3438             tx_resp->failure_rts,
3439             tx_resp->failure_frame,
3440             le32toh(tx_resp->initial_rate),
3441             (int) le16toh(tx_resp->wireless_media_time));
3442
3443         if (status != IWM_TX_STATUS_SUCCESS &&
3444             status != IWM_TX_STATUS_DIRECT_DONE) {
3445                 ieee80211_ratectl_tx_complete(vap, ni,
3446                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3447                 return (1);
3448         } else {
3449                 ieee80211_ratectl_tx_complete(vap, ni,
3450                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3451                 return (0);
3452         }
3453 }
3454
3455 static void
3456 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3457         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3458 {
3459         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3460         int idx = cmd_hdr->idx;
3461         int qid = cmd_hdr->qid;
3462         struct iwm_tx_ring *ring = &sc->txq[qid];
3463         struct iwm_tx_data *txd = &ring->data[idx];
3464         struct iwm_node *in = txd->in;
3465         struct mbuf *m = txd->m;
3466         int status;
3467
3468         KASSERT(txd->done == 0, ("txd not done"));
3469         KASSERT(txd->in != NULL, ("txd without node"));
3470         KASSERT(txd->m != NULL, ("txd without mbuf"));
3471
3472         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3473
3474         sc->sc_tx_timer = 0;
3475
3476         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3477
3478         /* Unmap and free mbuf. */
3479         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3480         bus_dmamap_unload(ring->data_dmat, txd->map);
3481
3482         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3483             "free txd %p, in %p\n", txd, txd->in);
3484         txd->done = 1;
3485         txd->m = NULL;
3486         txd->in = NULL;
3487
3488         ieee80211_tx_complete(&in->in_ni, m, status);
3489
3490         if (--ring->queued < IWM_TX_RING_LOMARK) {
3491                 sc->qfullmsk &= ~(1 << ring->qid);
3492                 if (sc->qfullmsk == 0) {
3493                         iwm_start(sc);
3494                 }
3495         }
3496 }
3497
3498 /*
3499  * transmit side
3500  */
3501
3502 /*
3503  * Process a "command done" firmware notification.  This is where we wakeup
3504  * processes waiting for a synchronous command completion.
3505  * from if_iwn
3506  */
3507 static void
3508 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3509 {
3510         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3511         struct iwm_tx_data *data;
3512
3513         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3514                 return; /* Not a command ack. */
3515         }
3516
3517         /* XXX wide commands? */
3518         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3519             "cmd notification type 0x%x qid %d idx %d\n",
3520             pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3521
3522         data = &ring->data[pkt->hdr.idx];
3523
3524         /* If the command was mapped in an mbuf, free it. */
3525         if (data->m != NULL) {
3526                 bus_dmamap_sync(ring->data_dmat, data->map,
3527                     BUS_DMASYNC_POSTWRITE);
3528                 bus_dmamap_unload(ring->data_dmat, data->map);
3529                 m_freem(data->m);
3530                 data->m = NULL;
3531         }
3532         wakeup(&ring->desc[pkt->hdr.idx]);
3533
3534         if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3535                 device_printf(sc->sc_dev,
3536                     "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3537                     __func__, pkt->hdr.idx, ring->queued, ring->cur);
3538                 /* XXX call iwm_force_nmi() */
3539         }
3540
3541         KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3542         ring->queued--;
3543         if (ring->queued == 0)
3544                 iwm_pcie_clear_cmd_in_flight(sc);
3545 }
3546
3547 #if 0
3548 /*
3549  * necessary only for block ack mode
3550  */
3551 void
3552 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3553         uint16_t len)
3554 {
3555         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3556         uint16_t w_val;
3557
3558         scd_bc_tbl = sc->sched_dma.vaddr;
3559
3560         len += 8; /* magic numbers came naturally from paris */
3561         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3562                 len = roundup(len, 4) / 4;
3563
3564         w_val = htole16(sta_id << 12 | len);
3565
3566         /* Update TX scheduler. */
3567         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3568         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3569             BUS_DMASYNC_PREWRITE);
3570
3571         /* I really wonder what this is ?!? */
3572         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3573                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3574                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3575                     BUS_DMASYNC_PREWRITE);
3576         }
3577 }
3578 #endif
3579
3580 /*
3581  * Take an 802.11 (non-n) rate, find the relevant rate
3582  * table entry.  return the index into in_ridx[].
3583  *
3584  * The caller then uses that index back into in_ridx
3585  * to figure out the rate index programmed /into/
3586  * the firmware for this given node.
3587  */
3588 static int
3589 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3590     uint8_t rate)
3591 {
3592         int i;
3593         uint8_t r;
3594
3595         for (i = 0; i < nitems(in->in_ridx); i++) {
3596                 r = iwm_rates[in->in_ridx[i]].rate;
3597                 if (rate == r)
3598                         return (i);
3599         }
3600
3601         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3602             "%s: couldn't find an entry for rate=%d\n",
3603             __func__,
3604             rate);
3605
3606         /* XXX Return the first */
3607         /* XXX TODO: have it return the /lowest/ */
3608         return (0);
3609 }
3610
3611 static int
3612 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3613 {
3614         int i;
3615
3616         for (i = 0; i < nitems(iwm_rates); i++) {
3617                 if (iwm_rates[i].rate == rate)
3618                         return (i);
3619         }
3620         /* XXX error? */
3621         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3622             "%s: couldn't find an entry for rate=%d\n",
3623             __func__,
3624             rate);
3625         return (0);
3626 }
3627
3628 /*
3629  * Fill in the rate related information for a transmit command.
3630  */
3631 static const struct iwm_rate *
3632 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3633         struct mbuf *m, struct iwm_tx_cmd *tx)
3634 {
3635         struct ieee80211_node *ni = &in->in_ni;
3636         struct ieee80211_frame *wh;
3637         const struct ieee80211_txparam *tp = ni->ni_txparms;
3638         const struct iwm_rate *rinfo;
3639         int type;
3640         int ridx, rate_flags;
3641
3642         wh = mtod(m, struct ieee80211_frame *);
3643         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3644
3645         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3646         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3647
3648         if (type == IEEE80211_FC0_TYPE_MGT) {
3649                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3650                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3651                     "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3652         } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3653                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3654                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3655                     "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3656         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3657                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3658                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3659                     "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3660         } else if (m->m_flags & M_EAPOL) {
3661                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3662                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3663                     "%s: EAPOL\n", __func__);
3664         } else if (type == IEEE80211_FC0_TYPE_DATA) {
3665                 int i;
3666
3667                 /* for data frames, use RS table */
3668                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3669                 /* XXX pass pktlen */
3670                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3671                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3672                 ridx = in->in_ridx[i];
3673
3674                 /* This is the index into the programmed table */
3675                 tx->initial_rate_index = i;
3676                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3677
3678                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3679                     "%s: start with i=%d, txrate %d\n",
3680                     __func__, i, iwm_rates[ridx].rate);
3681         } else {
3682                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3683                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3684                     __func__, tp->mgmtrate);
3685         }
3686
3687         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3688             "%s: frame type=%d txrate %d\n",
3689                 __func__, type, iwm_rates[ridx].rate);
3690
3691         rinfo = &iwm_rates[ridx];
3692
3693         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3694             __func__, ridx,
3695             rinfo->rate,
3696             !! (IWM_RIDX_IS_CCK(ridx))
3697             );
3698
3699         /* XXX TODO: hard-coded TX antenna? */
3700         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3701         if (IWM_RIDX_IS_CCK(ridx))
3702                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3703         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3704
3705         return rinfo;
3706 }
3707
3708 #define TB0_SIZE 16
3709 static int
3710 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3711 {
3712         struct ieee80211com *ic = &sc->sc_ic;
3713         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3714         struct iwm_node *in = IWM_NODE(ni);
3715         struct iwm_tx_ring *ring;
3716         struct iwm_tx_data *data;
3717         struct iwm_tfd *desc;
3718         struct iwm_device_cmd *cmd;
3719         struct iwm_tx_cmd *tx;
3720         struct ieee80211_frame *wh;
3721         struct ieee80211_key *k = NULL;
3722         struct mbuf *m1;
3723         const struct iwm_rate *rinfo;
3724         uint32_t flags;
3725         u_int hdrlen;
3726         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3727         int nsegs;
3728         uint8_t tid, type;
3729         int i, totlen, error, pad;
3730
3731         wh = mtod(m, struct ieee80211_frame *);
3732         hdrlen = ieee80211_anyhdrsize(wh);
3733         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3734         tid = 0;
3735         ring = &sc->txq[ac];
3736         desc = &ring->desc[ring->cur];
3737         memset(desc, 0, sizeof(*desc));
3738         data = &ring->data[ring->cur];
3739
3740         /* Fill out iwm_tx_cmd to send to the firmware */
3741         cmd = &ring->cmd[ring->cur];
3742         cmd->hdr.code = IWM_TX_CMD;
3743         cmd->hdr.flags = 0;
3744         cmd->hdr.qid = ring->qid;
3745         cmd->hdr.idx = ring->cur;
3746
3747         tx = (void *)cmd->data;
3748         memset(tx, 0, sizeof(*tx));
3749
3750         rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3751
3752         /* Encrypt the frame if need be. */
3753         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3754                 /* Retrieve key for TX && do software encryption. */
3755                 k = ieee80211_crypto_encap(ni, m);
3756                 if (k == NULL) {
3757                         m_freem(m);
3758                         return (ENOBUFS);
3759                 }
3760                 /* 802.11 header may have moved. */
3761                 wh = mtod(m, struct ieee80211_frame *);
3762         }
3763
3764         if (ieee80211_radiotap_active_vap(vap)) {
3765                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3766
3767                 tap->wt_flags = 0;
3768                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3769                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3770                 tap->wt_rate = rinfo->rate;
3771                 if (k != NULL)
3772                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3773                 ieee80211_radiotap_tx(vap, m);
3774         }
3775
3776
3777         totlen = m->m_pkthdr.len;
3778
3779         flags = 0;
3780         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3781                 flags |= IWM_TX_CMD_FLG_ACK;
3782         }
3783
3784         if (type == IEEE80211_FC0_TYPE_DATA
3785             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3786             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3787                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3788         }
3789
3790         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3791             type != IEEE80211_FC0_TYPE_DATA)
3792                 tx->sta_id = sc->sc_aux_sta.sta_id;
3793         else
3794                 tx->sta_id = IWM_STATION_ID;
3795
3796         if (type == IEEE80211_FC0_TYPE_MGT) {
3797                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3798
3799                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3800                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3801                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3802                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3803                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3804                 } else {
3805                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3806                 }
3807         } else {
3808                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3809         }
3810
3811         if (hdrlen & 3) {
3812                 /* First segment length must be a multiple of 4. */
3813                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3814                 pad = 4 - (hdrlen & 3);
3815         } else
3816                 pad = 0;
3817
3818         tx->driver_txop = 0;
3819         tx->next_frame_len = 0;
3820
3821         tx->len = htole16(totlen);
3822         tx->tid_tspec = tid;
3823         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3824
3825         /* Set physical address of "scratch area". */
3826         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3827         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3828
3829         /* Copy 802.11 header in TX command. */
3830         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3831
3832         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3833
3834         tx->sec_ctl = 0;
3835         tx->tx_flags |= htole32(flags);
3836
3837         /* Trim 802.11 header. */
3838         m_adj(m, hdrlen);
3839         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3840             segs, &nsegs, BUS_DMA_NOWAIT);
3841         if (error != 0) {
3842                 if (error != EFBIG) {
3843                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3844                             error);
3845                         m_freem(m);
3846                         return error;
3847                 }
3848                 /* Too many DMA segments, linearize mbuf. */
3849                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3850                 if (m1 == NULL) {
3851                         device_printf(sc->sc_dev,
3852                             "%s: could not defrag mbuf\n", __func__);
3853                         m_freem(m);
3854                         return (ENOBUFS);
3855                 }
3856                 m = m1;
3857
3858                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3859                     segs, &nsegs, BUS_DMA_NOWAIT);
3860                 if (error != 0) {
3861                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3862                             error);
3863                         m_freem(m);
3864                         return error;
3865                 }
3866         }
3867         data->m = m;
3868         data->in = in;
3869         data->done = 0;
3870
3871         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3872             "sending txd %p, in %p\n", data, data->in);
3873         KASSERT(data->in != NULL, ("node is NULL"));
3874
3875         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3876             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3877             ring->qid, ring->cur, totlen, nsegs,
3878             le32toh(tx->tx_flags),
3879             le32toh(tx->rate_n_flags),
3880             tx->initial_rate_index
3881             );
3882
3883         /* Fill TX descriptor. */
3884         desc->num_tbs = 2 + nsegs;
3885
3886         desc->tbs[0].lo = htole32(data->cmd_paddr);
3887         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3888             (TB0_SIZE << 4);
3889         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3890         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3891             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3892               + hdrlen + pad - TB0_SIZE) << 4);
3893
3894         /* Other DMA segments are for data payload. */
3895         for (i = 0; i < nsegs; i++) {
3896                 seg = &segs[i];
3897                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3898                 desc->tbs[i+2].hi_n_len = \
3899                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3900                     | ((seg->ds_len) << 4);
3901         }
3902
3903         bus_dmamap_sync(ring->data_dmat, data->map,
3904             BUS_DMASYNC_PREWRITE);
3905         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3906             BUS_DMASYNC_PREWRITE);
3907         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3908             BUS_DMASYNC_PREWRITE);
3909
3910 #if 0
3911         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3912 #endif
3913
3914         /* Kick TX ring. */
3915         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3916         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3917
3918         /* Mark TX ring as full if we reach a certain threshold. */
3919         if (++ring->queued > IWM_TX_RING_HIMARK) {
3920                 sc->qfullmsk |= 1 << ring->qid;
3921         }
3922
3923         return 0;
3924 }
3925
3926 static int
3927 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3928     const struct ieee80211_bpf_params *params)
3929 {
3930         struct ieee80211com *ic = ni->ni_ic;
3931         struct iwm_softc *sc = ic->ic_softc;
3932         int error = 0;
3933
3934         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3935             "->%s begin\n", __func__);
3936
3937         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3938                 m_freem(m);
3939                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3940                     "<-%s not RUNNING\n", __func__);
3941                 return (ENETDOWN);
3942         }
3943
3944         IWM_LOCK(sc);
3945         /* XXX fix this */
3946         if (params == NULL) {
3947                 error = iwm_tx(sc, m, ni, 0);
3948         } else {
3949                 error = iwm_tx(sc, m, ni, 0);
3950         }
3951         sc->sc_tx_timer = 5;
3952         IWM_UNLOCK(sc);
3953
3954         return (error);
3955 }
3956
3957 /*
3958  * mvm/tx.c
3959  */
3960
3961 /*
3962  * Note that there are transports that buffer frames before they reach
3963  * the firmware. This means that after flush_tx_path is called, the
3964  * queue might not be empty. The race-free way to handle this is to:
3965  * 1) set the station as draining
3966  * 2) flush the Tx path
3967  * 3) wait for the transport queues to be empty
3968  */
3969 int
3970 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3971 {
3972         int ret;
3973         struct iwm_tx_path_flush_cmd flush_cmd = {
3974                 .queues_ctl = htole32(tfd_msk),
3975                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3976         };
3977
3978         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3979             sizeof(flush_cmd), &flush_cmd);
3980         if (ret)
3981                 device_printf(sc->sc_dev,
3982                     "Flushing tx queue failed: %d\n", ret);
3983         return ret;
3984 }
3985
3986 /*
3987  * BEGIN mvm/sta.c
3988  */
3989
3990 static int
3991 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3992         struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3993 {
3994         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3995             cmd, status);
3996 }
3997
3998 /* send station add/update command to firmware */
3999 static int
4000 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
4001 {
4002         struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
4003         int ret;
4004         uint32_t status;
4005
4006         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4007
4008         add_sta_cmd.sta_id = IWM_STATION_ID;
4009         add_sta_cmd.mac_id_n_color
4010             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
4011                 IWM_DEFAULT_COLOR));
4012         if (!update) {
4013                 int ac;
4014                 for (ac = 0; ac < WME_NUM_AC; ac++) {
4015                         add_sta_cmd.tfd_queue_msk |=
4016                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
4017                 }
4018                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
4019         }
4020         add_sta_cmd.add_modify = update ? 1 : 0;
4021         add_sta_cmd.station_flags_msk
4022             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4023         add_sta_cmd.tid_disable_tx = htole16(0xffff);
4024         if (update)
4025                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
4026
4027         status = IWM_ADD_STA_SUCCESS;
4028         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
4029         if (ret)
4030                 return ret;
4031
4032         switch (status) {
4033         case IWM_ADD_STA_SUCCESS:
4034                 break;
4035         default:
4036                 ret = EIO;
4037                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
4038                 break;
4039         }
4040
4041         return ret;
4042 }
4043
4044 static int
4045 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
4046 {
4047         return iwm_mvm_sta_send_to_fw(sc, in, 0);
4048 }
4049
4050 static int
4051 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
4052 {
4053         return iwm_mvm_sta_send_to_fw(sc, in, 1);
4054 }
4055
4056 static int
4057 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
4058         const uint8_t *addr, uint16_t mac_id, uint16_t color)
4059 {
4060         struct iwm_mvm_add_sta_cmd_v7 cmd;
4061         int ret;
4062         uint32_t status;
4063
4064         memset(&cmd, 0, sizeof(cmd));
4065         cmd.sta_id = sta->sta_id;
4066         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
4067
4068         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
4069         cmd.tid_disable_tx = htole16(0xffff);
4070
4071         if (addr)
4072                 IEEE80211_ADDR_COPY(cmd.addr, addr);
4073
4074         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
4075         if (ret)
4076                 return ret;
4077
4078         switch (status) {
4079         case IWM_ADD_STA_SUCCESS:
4080                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
4081                     "%s: Internal station added.\n", __func__);
4082                 return 0;
4083         default:
4084                 device_printf(sc->sc_dev,
4085                     "%s: Add internal station failed, status=0x%x\n",
4086                     __func__, status);
4087                 ret = EIO;
4088                 break;
4089         }
4090         return ret;
4091 }
4092
4093 static int
4094 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
4095 {
4096         int ret;
4097
4098         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
4099         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
4100
4101         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
4102         if (ret)
4103                 return ret;
4104
4105         ret = iwm_mvm_add_int_sta_common(sc,
4106             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4107
4108         if (ret)
4109                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4110         return ret;
4111 }
4112
4113 /*
4114  * END mvm/sta.c
4115  */
4116
4117 /*
4118  * BEGIN mvm/quota.c
4119  */
4120
4121 static int
4122 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
4123 {
4124         struct iwm_time_quota_cmd cmd;
4125         int i, idx, ret, num_active_macs, quota, quota_rem;
4126         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4127         int n_ifs[IWM_MAX_BINDINGS] = {0, };
4128         uint16_t id;
4129
4130         memset(&cmd, 0, sizeof(cmd));
4131
4132         /* currently, PHY ID == binding ID */
4133         if (in) {
4134                 id = in->in_phyctxt->id;
4135                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4136                 colors[id] = in->in_phyctxt->color;
4137
4138                 if (1)
4139                         n_ifs[id] = 1;
4140         }
4141
4142         /*
4143          * The FW's scheduling session consists of
4144          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
4145          * equally between all the bindings that require quota
4146          */
4147         num_active_macs = 0;
4148         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4149                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4150                 num_active_macs += n_ifs[i];
4151         }
4152
4153         quota = 0;
4154         quota_rem = 0;
4155         if (num_active_macs) {
4156                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
4157                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
4158         }
4159
4160         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4161                 if (colors[i] < 0)
4162                         continue;
4163
4164                 cmd.quotas[idx].id_and_color =
4165                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4166
4167                 if (n_ifs[i] <= 0) {
4168                         cmd.quotas[idx].quota = htole32(0);
4169                         cmd.quotas[idx].max_duration = htole32(0);
4170                 } else {
4171                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4172                         cmd.quotas[idx].max_duration = htole32(0);
4173                 }
4174                 idx++;
4175         }
4176
4177         /* Give the remainder of the session to the first binding */
4178         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4179
4180         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4181             sizeof(cmd), &cmd);
4182         if (ret)
4183                 device_printf(sc->sc_dev,
4184                     "%s: Failed to send quota: %d\n", __func__, ret);
4185         return ret;
4186 }
4187
4188 /*
4189  * END mvm/quota.c
4190  */
4191
4192 /*
4193  * ieee80211 routines
4194  */
4195
4196 /*
4197  * Change to AUTH state in 80211 state machine.  Roughly matches what
4198  * Linux does in bss_info_changed().
4199  */
4200 static int
4201 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4202 {
4203         struct ieee80211_node *ni;
4204         struct iwm_node *in;
4205         struct iwm_vap *iv = IWM_VAP(vap);
4206         uint32_t duration;
4207         int error;
4208
4209         /*
4210          * XXX i have a feeling that the vap node is being
4211          * freed from underneath us. Grr.
4212          */
4213         ni = ieee80211_ref_node(vap->iv_bss);
4214         in = IWM_NODE(ni);
4215         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4216             "%s: called; vap=%p, bss ni=%p\n",
4217             __func__,
4218             vap,
4219             ni);
4220
4221         in->in_assoc = 0;
4222
4223         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4224         if (error != 0)
4225                 return error;
4226
4227         error = iwm_allow_mcast(vap, sc);
4228         if (error) {
4229                 device_printf(sc->sc_dev,
4230                     "%s: failed to set multicast\n", __func__);
4231                 goto out;
4232         }
4233
4234         /*
4235          * This is where it deviates from what Linux does.
4236          *
4237          * Linux iwlwifi doesn't reset the nic each time, nor does it
4238          * call ctxt_add() here.  Instead, it adds it during vap creation,
4239          * and always does a mac_ctx_changed().
4240          *
4241          * The openbsd port doesn't attempt to do that - it reset things
4242          * at odd states and does the add here.
4243          *
4244          * So, until the state handling is fixed (ie, we never reset
4245          * the NIC except for a firmware failure, which should drag
4246          * the NIC back to IDLE, re-setup and re-add all the mac/phy
4247          * contexts that are required), let's do a dirty hack here.
4248          */
4249         if (iv->is_uploaded) {
4250                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4251                         device_printf(sc->sc_dev,
4252                             "%s: failed to update MAC\n", __func__);
4253                         goto out;
4254                 }
4255                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4256                     in->in_ni.ni_chan, 1, 1)) != 0) {
4257                         device_printf(sc->sc_dev,
4258                             "%s: failed update phy ctxt\n", __func__);
4259                         goto out;
4260                 }
4261                 in->in_phyctxt = &sc->sc_phyctxt[0];
4262
4263                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
4264                         device_printf(sc->sc_dev,
4265                             "%s: binding update cmd\n", __func__);
4266                         goto out;
4267                 }
4268                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4269                         device_printf(sc->sc_dev,
4270                             "%s: failed to update sta\n", __func__);
4271                         goto out;
4272                 }
4273         } else {
4274                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4275                         device_printf(sc->sc_dev,
4276                             "%s: failed to add MAC\n", __func__);
4277                         goto out;
4278                 }
4279                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4280                     in->in_ni.ni_chan, 1, 1)) != 0) {
4281                         device_printf(sc->sc_dev,
4282                             "%s: failed add phy ctxt!\n", __func__);
4283                         error = ETIMEDOUT;
4284                         goto out;
4285                 }
4286                 in->in_phyctxt = &sc->sc_phyctxt[0];
4287
4288                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
4289                         device_printf(sc->sc_dev,
4290                             "%s: binding add cmd\n", __func__);
4291                         goto out;
4292                 }
4293                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4294                         device_printf(sc->sc_dev,
4295                             "%s: failed to add sta\n", __func__);
4296                         goto out;
4297                 }
4298         }
4299
4300         /*
4301          * Prevent the FW from wandering off channel during association
4302          * by "protecting" the session with a time event.
4303          */
4304         /* XXX duration is in units of TU, not MS */
4305         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4306         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
4307         DELAY(100);
4308
4309         error = 0;
4310 out:
4311         ieee80211_free_node(ni);
4312         return (error);
4313 }
4314
4315 static int
4316 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4317 {
4318         struct iwm_node *in = IWM_NODE(vap->iv_bss);
4319         int error;
4320
4321         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4322                 device_printf(sc->sc_dev,
4323                     "%s: failed to update STA\n", __func__);
4324                 return error;
4325         }
4326
4327         in->in_assoc = 1;
4328         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4329                 device_printf(sc->sc_dev,
4330                     "%s: failed to update MAC\n", __func__);
4331                 return error;
4332         }
4333
4334         return 0;
4335 }
4336
4337 static int
4338 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4339 {
4340         uint32_t tfd_msk;
4341
4342         /*
4343          * Ok, so *technically* the proper set of calls for going
4344          * from RUN back to SCAN is:
4345          *
4346          * iwm_mvm_power_mac_disable(sc, in);
4347          * iwm_mvm_mac_ctxt_changed(sc, in);
4348          * iwm_mvm_rm_sta(sc, in);
4349          * iwm_mvm_update_quotas(sc, NULL);
4350          * iwm_mvm_mac_ctxt_changed(sc, in);
4351          * iwm_mvm_binding_remove_vif(sc, in);
4352          * iwm_mvm_mac_ctxt_remove(sc, in);
4353          *
4354          * However, that freezes the device not matter which permutations
4355          * and modifications are attempted.  Obviously, this driver is missing
4356          * something since it works in the Linux driver, but figuring out what
4357          * is missing is a little more complicated.  Now, since we're going
4358          * back to nothing anyway, we'll just do a complete device reset.
4359          * Up your's, device!
4360          */
4361         /*
4362          * Just using 0xf for the queues mask is fine as long as we only
4363          * get here from RUN state.
4364          */
4365         tfd_msk = 0xf;
4366         mbufq_drain(&sc->sc_snd);
4367         iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4368         /*
4369          * We seem to get away with just synchronously sending the
4370          * IWM_TXPATH_FLUSH command.
4371          */
4372 //      iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4373         iwm_stop_device(sc);
4374         iwm_init_hw(sc);
4375         if (in)
4376                 in->in_assoc = 0;
4377         return 0;
4378
4379 #if 0
4380         int error;
4381
4382         iwm_mvm_power_mac_disable(sc, in);
4383
4384         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4385                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4386                 return error;
4387         }
4388
4389         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4390                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4391                 return error;
4392         }
4393         error = iwm_mvm_rm_sta(sc, in);
4394         in->in_assoc = 0;
4395         iwm_mvm_update_quotas(sc, NULL);
4396         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4397                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4398                 return error;
4399         }
4400         iwm_mvm_binding_remove_vif(sc, in);
4401
4402         iwm_mvm_mac_ctxt_remove(sc, in);
4403
4404         return error;
4405 #endif
4406 }
4407
4408 static struct ieee80211_node *
4409 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4410 {
4411         return malloc(sizeof (struct iwm_node), M_80211_NODE,
4412             M_NOWAIT | M_ZERO);
4413 }
4414
4415 static void
4416 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4417 {
4418         struct ieee80211_node *ni = &in->in_ni;
4419         struct iwm_lq_cmd *lq = &in->in_lq;
4420         int nrates = ni->ni_rates.rs_nrates;
4421         int i, ridx, tab = 0;
4422 //      int txant = 0;
4423
4424         if (nrates > nitems(lq->rs_table)) {
4425                 device_printf(sc->sc_dev,
4426                     "%s: node supports %d rates, driver handles "
4427                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4428                 return;
4429         }
4430         if (nrates == 0) {
4431                 device_printf(sc->sc_dev,
4432                     "%s: node supports 0 rates, odd!\n", __func__);
4433                 return;
4434         }
4435
4436         /*
4437          * XXX .. and most of iwm_node is not initialised explicitly;
4438          * it's all just 0x0 passed to the firmware.
4439          */
4440
4441         /* first figure out which rates we should support */
4442         /* XXX TODO: this isn't 11n aware /at all/ */
4443         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4444         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4445             "%s: nrates=%d\n", __func__, nrates);
4446
4447         /*
4448          * Loop over nrates and populate in_ridx from the highest
4449          * rate to the lowest rate.  Remember, in_ridx[] has
4450          * IEEE80211_RATE_MAXSIZE entries!
4451          */
4452         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4453                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4454
4455                 /* Map 802.11 rate to HW rate index. */
4456                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4457                         if (iwm_rates[ridx].rate == rate)
4458                                 break;
4459                 if (ridx > IWM_RIDX_MAX) {
4460                         device_printf(sc->sc_dev,
4461                             "%s: WARNING: device rate for %d not found!\n",
4462                             __func__, rate);
4463                 } else {
4464                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4465                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4466                             __func__,
4467                             i,
4468                             rate,
4469                             ridx);
4470                         in->in_ridx[i] = ridx;
4471                 }
4472         }
4473
4474         /* then construct a lq_cmd based on those */
4475         memset(lq, 0, sizeof(*lq));
4476         lq->sta_id = IWM_STATION_ID;
4477
4478         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4479         if (ni->ni_flags & IEEE80211_NODE_HT)
4480                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4481
4482         /*
4483          * are these used? (we don't do SISO or MIMO)
4484          * need to set them to non-zero, though, or we get an error.
4485          */
4486         lq->single_stream_ant_msk = 1;
4487         lq->dual_stream_ant_msk = 1;
4488
4489         /*
4490          * Build the actual rate selection table.
4491          * The lowest bits are the rates.  Additionally,
4492          * CCK needs bit 9 to be set.  The rest of the bits
4493          * we add to the table select the tx antenna
4494          * Note that we add the rates in the highest rate first
4495          * (opposite of ni_rates).
4496          */
4497         /*
4498          * XXX TODO: this should be looping over the min of nrates
4499          * and LQ_MAX_RETRY_NUM.  Sigh.
4500          */
4501         for (i = 0; i < nrates; i++) {
4502                 int nextant;
4503
4504 #if 0
4505                 if (txant == 0)
4506                         txant = iwm_mvm_get_valid_tx_ant(sc);
4507                 nextant = 1<<(ffs(txant)-1);
4508                 txant &= ~nextant;
4509 #else
4510                 nextant = iwm_mvm_get_valid_tx_ant(sc);
4511 #endif
4512                 /*
4513                  * Map the rate id into a rate index into
4514                  * our hardware table containing the
4515                  * configuration to use for this rate.
4516                  */
4517                 ridx = in->in_ridx[i];
4518                 tab = iwm_rates[ridx].plcp;
4519                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4520                 if (IWM_RIDX_IS_CCK(ridx))
4521                         tab |= IWM_RATE_MCS_CCK_MSK;
4522                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4523                     "station rate i=%d, rate=%d, hw=%x\n",
4524                     i, iwm_rates[ridx].rate, tab);
4525                 lq->rs_table[i] = htole32(tab);
4526         }
4527         /* then fill the rest with the lowest possible rate */
4528         for (i = nrates; i < nitems(lq->rs_table); i++) {
4529                 KASSERT(tab != 0, ("invalid tab"));
4530                 lq->rs_table[i] = htole32(tab);
4531         }
4532 }
4533
4534 static int
4535 iwm_media_change(struct ifnet *ifp)
4536 {
4537         struct ieee80211vap *vap = ifp->if_softc;
4538         struct ieee80211com *ic = vap->iv_ic;
4539         struct iwm_softc *sc = ic->ic_softc;
4540         int error;
4541
4542         error = ieee80211_media_change(ifp);
4543         if (error != ENETRESET)
4544                 return error;
4545
4546         IWM_LOCK(sc);
4547         if (ic->ic_nrunning > 0) {
4548                 iwm_stop(sc);
4549                 iwm_init(sc);
4550         }
4551         IWM_UNLOCK(sc);
4552         return error;
4553 }
4554
4555
4556 static int
4557 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4558 {
4559         struct iwm_vap *ivp = IWM_VAP(vap);
4560         struct ieee80211com *ic = vap->iv_ic;
4561         struct iwm_softc *sc = ic->ic_softc;
4562         struct iwm_node *in;
4563         int error;
4564
4565         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4566             "switching state %s -> %s\n",
4567             ieee80211_state_name[vap->iv_state],
4568             ieee80211_state_name[nstate]);
4569         IEEE80211_UNLOCK(ic);
4570         IWM_LOCK(sc);
4571
4572         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4573                 iwm_led_blink_stop(sc);
4574
4575         /* disable beacon filtering if we're hopping out of RUN */
4576         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4577                 iwm_mvm_disable_beacon_filter(sc);
4578
4579                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4580                         in->in_assoc = 0;
4581
4582                 if (nstate == IEEE80211_S_INIT) {
4583                         IWM_UNLOCK(sc);
4584                         IEEE80211_LOCK(ic);
4585                         error = ivp->iv_newstate(vap, nstate, arg);
4586                         IEEE80211_UNLOCK(ic);
4587                         IWM_LOCK(sc);
4588                         iwm_release(sc, NULL);
4589                         IWM_UNLOCK(sc);
4590                         IEEE80211_LOCK(ic);
4591                         return error;
4592                 }
4593
4594                 /*
4595                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4596                  * above then the card will be completely reinitialized,
4597                  * so the driver must do everything necessary to bring the card
4598                  * from INIT to SCAN.
4599                  *
4600                  * Additionally, upon receiving deauth frame from AP,
4601                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4602                  * state. This will also fail with this driver, so bring the FSM
4603                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4604                  *
4605                  * XXX TODO: fix this for FreeBSD!
4606                  */
4607                 if (nstate == IEEE80211_S_SCAN ||
4608                     nstate == IEEE80211_S_AUTH ||
4609                     nstate == IEEE80211_S_ASSOC) {
4610                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4611                             "Force transition to INIT; MGT=%d\n", arg);
4612                         IWM_UNLOCK(sc);
4613                         IEEE80211_LOCK(ic);
4614                         /* Always pass arg as -1 since we can't Tx right now. */
4615                         /*
4616                          * XXX arg is just ignored anyway when transitioning
4617                          *     to IEEE80211_S_INIT.
4618                          */
4619                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4620                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4621                             "Going INIT->SCAN\n");
4622                         nstate = IEEE80211_S_SCAN;
4623                         IEEE80211_UNLOCK(ic);
4624                         IWM_LOCK(sc);
4625                 }
4626         }
4627
4628         switch (nstate) {
4629         case IEEE80211_S_INIT:
4630                 break;
4631
4632         case IEEE80211_S_AUTH:
4633                 if ((error = iwm_auth(vap, sc)) != 0) {
4634                         device_printf(sc->sc_dev,
4635                             "%s: could not move to auth state: %d\n",
4636                             __func__, error);
4637                         break;
4638                 }
4639                 break;
4640
4641         case IEEE80211_S_ASSOC:
4642                 if ((error = iwm_assoc(vap, sc)) != 0) {
4643                         device_printf(sc->sc_dev,
4644                             "%s: failed to associate: %d\n", __func__,
4645                             error);
4646                         break;
4647                 }
4648                 break;
4649
4650         case IEEE80211_S_RUN:
4651         {
4652                 struct iwm_host_cmd cmd = {
4653                         .id = IWM_LQ_CMD,
4654                         .len = { sizeof(in->in_lq), },
4655                         .flags = IWM_CMD_SYNC,
4656                 };
4657
4658                 /* Update the association state, now we have it all */
4659                 /* (eg associd comes in at this point */
4660                 error = iwm_assoc(vap, sc);
4661                 if (error != 0) {
4662                         device_printf(sc->sc_dev,
4663                             "%s: failed to update association state: %d\n",
4664                             __func__,
4665                             error);
4666                         break;
4667                 }
4668
4669                 in = IWM_NODE(vap->iv_bss);
4670                 iwm_mvm_power_mac_update_mode(sc, in);
4671                 iwm_mvm_enable_beacon_filter(sc, in);
4672                 iwm_mvm_update_quotas(sc, in);
4673                 iwm_setrates(sc, in);
4674
4675                 cmd.data[0] = &in->in_lq;
4676                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4677                         device_printf(sc->sc_dev,
4678                             "%s: IWM_LQ_CMD failed\n", __func__);
4679                 }
4680
4681                 iwm_mvm_led_enable(sc);
4682                 break;
4683         }
4684
4685         default:
4686                 break;
4687         }
4688         IWM_UNLOCK(sc);
4689         IEEE80211_LOCK(ic);
4690
4691         return (ivp->iv_newstate(vap, nstate, arg));
4692 }
4693
4694 void
4695 iwm_endscan_cb(void *arg, int pending)
4696 {
4697         struct iwm_softc *sc = arg;
4698         struct ieee80211com *ic = &sc->sc_ic;
4699
4700         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4701             "%s: scan ended\n",
4702             __func__);
4703
4704         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4705 }
4706
4707 /*
4708  * Aging and idle timeouts for the different possible scenarios
4709  * in default configuration
4710  */
4711 static const uint32_t
4712 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4713         {
4714                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4715                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4716         },
4717         {
4718                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4719                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4720         },
4721         {
4722                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4723                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4724         },
4725         {
4726                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4727                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4728         },
4729         {
4730                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4731                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4732         },
4733 };
4734
4735 /*
4736  * Aging and idle timeouts for the different possible scenarios
4737  * in single BSS MAC configuration.
4738  */
4739 static const uint32_t
4740 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4741         {
4742                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4743                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4744         },
4745         {
4746                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4747                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4748         },
4749         {
4750                 htole32(IWM_SF_MCAST_AGING_TIMER),
4751                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4752         },
4753         {
4754                 htole32(IWM_SF_BA_AGING_TIMER),
4755                 htole32(IWM_SF_BA_IDLE_TIMER)
4756         },
4757         {
4758                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4759                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4760         },
4761 };
4762
4763 static void
4764 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4765     struct ieee80211_node *ni)
4766 {
4767         int i, j, watermark;
4768
4769         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4770
4771         /*
4772          * If we are in association flow - check antenna configuration
4773          * capabilities of the AP station, and choose the watermark accordingly.
4774          */
4775         if (ni) {
4776                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4777 #ifdef notyet
4778                         if (ni->ni_rxmcs[2] != 0)
4779                                 watermark = IWM_SF_W_MARK_MIMO3;
4780                         else if (ni->ni_rxmcs[1] != 0)
4781                                 watermark = IWM_SF_W_MARK_MIMO2;
4782                         else
4783 #endif
4784                                 watermark = IWM_SF_W_MARK_SISO;
4785                 } else {
4786                         watermark = IWM_SF_W_MARK_LEGACY;
4787                 }
4788         /* default watermark value for unassociated mode. */
4789         } else {
4790                 watermark = IWM_SF_W_MARK_MIMO2;
4791         }
4792         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4793
4794         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4795                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4796                         sf_cmd->long_delay_timeouts[i][j] =
4797                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4798                 }
4799         }
4800
4801         if (ni) {
4802                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4803                        sizeof(iwm_sf_full_timeout));
4804         } else {
4805                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4806                        sizeof(iwm_sf_full_timeout_def));
4807         }
4808 }
4809
4810 static int
4811 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4812 {
4813         struct ieee80211com *ic = &sc->sc_ic;
4814         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4815         struct iwm_sf_cfg_cmd sf_cmd = {
4816                 .state = htole32(IWM_SF_FULL_ON),
4817         };
4818         int ret = 0;
4819
4820         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4821                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4822
4823         switch (new_state) {
4824         case IWM_SF_UNINIT:
4825         case IWM_SF_INIT_OFF:
4826                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4827                 break;
4828         case IWM_SF_FULL_ON:
4829                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4830                 break;
4831         default:
4832                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4833                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4834                           new_state);
4835                 return EINVAL;
4836         }
4837
4838         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4839                                    sizeof(sf_cmd), &sf_cmd);
4840         return ret;
4841 }
4842
4843 static int
4844 iwm_send_bt_init_conf(struct iwm_softc *sc)
4845 {
4846         struct iwm_bt_coex_cmd bt_cmd;
4847
4848         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4849         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4850
4851         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4852             &bt_cmd);
4853 }
4854
4855 static int
4856 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4857 {
4858         struct iwm_mcc_update_cmd mcc_cmd;
4859         struct iwm_host_cmd hcmd = {
4860                 .id = IWM_MCC_UPDATE_CMD,
4861                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4862                 .data = { &mcc_cmd },
4863         };
4864         int ret;
4865 #ifdef IWM_DEBUG
4866         struct iwm_rx_packet *pkt;
4867         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4868         struct iwm_mcc_update_resp *mcc_resp;
4869         int n_channels;
4870         uint16_t mcc;
4871 #endif
4872         int resp_v2 = isset(sc->sc_enabled_capa,
4873             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4874
4875         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4876         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4877         if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4878             isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4879                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4880         else
4881                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4882
4883         if (resp_v2)
4884                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4885         else
4886                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4887
4888         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4889             "send MCC update to FW with '%c%c' src = %d\n",
4890             alpha2[0], alpha2[1], mcc_cmd.source_id);
4891
4892         ret = iwm_send_cmd(sc, &hcmd);
4893         if (ret)
4894                 return ret;
4895
4896 #ifdef IWM_DEBUG
4897         pkt = hcmd.resp_pkt;
4898
4899         /* Extract MCC response */
4900         if (resp_v2) {
4901                 mcc_resp = (void *)pkt->data;
4902                 mcc = mcc_resp->mcc;
4903                 n_channels =  le32toh(mcc_resp->n_channels);
4904         } else {
4905                 mcc_resp_v1 = (void *)pkt->data;
4906                 mcc = mcc_resp_v1->mcc;
4907                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4908         }
4909
4910         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4911         if (mcc == 0)
4912                 mcc = 0x3030;  /* "00" - world */
4913
4914         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4915             "regulatory domain '%c%c' (%d channels available)\n",
4916             mcc >> 8, mcc & 0xff, n_channels);
4917 #endif
4918         iwm_free_resp(sc, &hcmd);
4919
4920         return 0;
4921 }
4922
4923 static void
4924 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4925 {
4926         struct iwm_host_cmd cmd = {
4927                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4928                 .len = { sizeof(uint32_t), },
4929                 .data = { &backoff, },
4930         };
4931
4932         if (iwm_send_cmd(sc, &cmd) != 0) {
4933                 device_printf(sc->sc_dev,
4934                     "failed to change thermal tx backoff\n");
4935         }
4936 }
4937
4938 static int
4939 iwm_init_hw(struct iwm_softc *sc)
4940 {
4941         struct ieee80211com *ic = &sc->sc_ic;
4942         int error, i, ac;
4943
4944         if ((error = iwm_start_hw(sc)) != 0) {
4945                 printf("iwm_start_hw: failed %d\n", error);
4946                 return error;
4947         }
4948
4949         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4950                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4951                 return error;
4952         }
4953
4954         /*
4955          * should stop and start HW since that INIT
4956          * image just loaded
4957          */
4958         iwm_stop_device(sc);
4959         if ((error = iwm_start_hw(sc)) != 0) {
4960                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4961                 return error;
4962         }
4963
4964         /* omstart, this time with the regular firmware */
4965         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4966         if (error) {
4967                 device_printf(sc->sc_dev, "could not load firmware\n");
4968                 goto error;
4969         }
4970
4971         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4972                 device_printf(sc->sc_dev, "bt init conf failed\n");
4973                 goto error;
4974         }
4975
4976         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4977         if (error != 0) {
4978                 device_printf(sc->sc_dev, "antenna config failed\n");
4979                 goto error;
4980         }
4981
4982         /* Send phy db control command and then phy db calibration */
4983         if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4984                 goto error;
4985
4986         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4987                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4988                 goto error;
4989         }
4990
4991         /* Add auxiliary station for scanning */
4992         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4993                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4994                 goto error;
4995         }
4996
4997         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4998                 /*
4999                  * The channel used here isn't relevant as it's
5000                  * going to be overwritten in the other flows.
5001                  * For now use the first channel we have.
5002                  */
5003                 if ((error = iwm_mvm_phy_ctxt_add(sc,
5004                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
5005                         goto error;
5006         }
5007
5008         /* Initialize tx backoffs to the minimum. */
5009         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
5010                 iwm_mvm_tt_tx_backoff(sc, 0);
5011
5012         error = iwm_mvm_power_update_device(sc);
5013         if (error)
5014                 goto error;
5015
5016         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
5017                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
5018                         goto error;
5019         }
5020
5021         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
5022                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
5023                         goto error;
5024         }
5025
5026         /* Enable Tx queues. */
5027         for (ac = 0; ac < WME_NUM_AC; ac++) {
5028                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
5029                     iwm_mvm_ac_to_tx_fifo[ac]);
5030                 if (error)
5031                         goto error;
5032         }
5033
5034         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
5035                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
5036                 goto error;
5037         }
5038
5039         return 0;
5040
5041  error:
5042         iwm_stop_device(sc);
5043         return error;
5044 }
5045
5046 /* Allow multicast from our BSSID. */
5047 static int
5048 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
5049 {
5050         struct ieee80211_node *ni = vap->iv_bss;
5051         struct iwm_mcast_filter_cmd *cmd;
5052         size_t size;
5053         int error;
5054
5055         size = roundup(sizeof(*cmd), 4);
5056         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
5057         if (cmd == NULL)
5058                 return ENOMEM;
5059         cmd->filter_own = 1;
5060         cmd->port_id = 0;
5061         cmd->count = 0;
5062         cmd->pass_all = 1;
5063         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
5064
5065         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
5066             IWM_CMD_SYNC, size, cmd);
5067         free(cmd, M_DEVBUF);
5068
5069         return (error);
5070 }
5071
5072 /*
5073  * ifnet interfaces
5074  */
5075
5076 static void
5077 iwm_init(struct iwm_softc *sc)
5078 {
5079         int error;
5080
5081         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
5082                 return;
5083         }
5084         sc->sc_generation++;
5085         sc->sc_flags &= ~IWM_FLAG_STOPPED;
5086
5087         if ((error = iwm_init_hw(sc)) != 0) {
5088                 printf("iwm_init_hw failed %d\n", error);
5089                 iwm_stop(sc);
5090                 return;
5091         }
5092
5093         /*
5094          * Ok, firmware loaded and we are jogging
5095          */
5096         sc->sc_flags |= IWM_FLAG_HW_INITED;
5097         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5098 }
5099
5100 static int
5101 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
5102 {
5103         struct iwm_softc *sc;
5104         int error;
5105
5106         sc = ic->ic_softc;
5107
5108         IWM_LOCK(sc);
5109         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
5110                 IWM_UNLOCK(sc);
5111                 return (ENXIO);
5112         }
5113         error = mbufq_enqueue(&sc->sc_snd, m);
5114         if (error) {
5115                 IWM_UNLOCK(sc);
5116                 return (error);
5117         }
5118         iwm_start(sc);
5119         IWM_UNLOCK(sc);
5120         return (0);
5121 }
5122
5123 /*
5124  * Dequeue packets from sendq and call send.
5125  */
5126 static void
5127 iwm_start(struct iwm_softc *sc)
5128 {
5129         struct ieee80211_node *ni;
5130         struct mbuf *m;
5131         int ac = 0;
5132
5133         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5134         while (sc->qfullmsk == 0 &&
5135                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5136                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5137                 if (iwm_tx(sc, m, ni, ac) != 0) {
5138                         if_inc_counter(ni->ni_vap->iv_ifp,
5139                             IFCOUNTER_OERRORS, 1);
5140                         ieee80211_free_node(ni);
5141                         continue;
5142                 }
5143                 sc->sc_tx_timer = 15;
5144         }
5145         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5146 }
5147
5148 static void
5149 iwm_stop(struct iwm_softc *sc)
5150 {
5151
5152         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5153         sc->sc_flags |= IWM_FLAG_STOPPED;
5154         sc->sc_generation++;
5155         iwm_led_blink_stop(sc);
5156         sc->sc_tx_timer = 0;
5157         iwm_stop_device(sc);
5158         sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5159 }
5160
5161 static void
5162 iwm_watchdog(void *arg)
5163 {
5164         struct iwm_softc *sc = arg;
5165         struct ieee80211com *ic = &sc->sc_ic;
5166
5167         if (sc->sc_tx_timer > 0) {
5168                 if (--sc->sc_tx_timer == 0) {
5169                         device_printf(sc->sc_dev, "device timeout\n");
5170 #ifdef IWM_DEBUG
5171                         iwm_nic_error(sc);
5172 #endif
5173                         ieee80211_restart_all(ic);
5174                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5175                         return;
5176                 }
5177         }
5178         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5179 }
5180
5181 static void
5182 iwm_parent(struct ieee80211com *ic)
5183 {
5184         struct iwm_softc *sc = ic->ic_softc;
5185         int startall = 0;
5186
5187         IWM_LOCK(sc);
5188         if (ic->ic_nrunning > 0) {
5189                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5190                         iwm_init(sc);
5191                         startall = 1;
5192                 }
5193         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5194                 iwm_stop(sc);
5195         IWM_UNLOCK(sc);
5196         if (startall)
5197                 ieee80211_start_all(ic);
5198 }
5199
5200 /*
5201  * The interrupt side of things
5202  */
5203
5204 /*
5205  * error dumping routines are from iwlwifi/mvm/utils.c
5206  */
5207
5208 /*
5209  * Note: This structure is read from the device with IO accesses,
5210  * and the reading already does the endian conversion. As it is
5211  * read with uint32_t-sized accesses, any members with a different size
5212  * need to be ordered correctly though!
5213  */
5214 struct iwm_error_event_table {
5215         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5216         uint32_t error_id;              /* type of error */
5217         uint32_t trm_hw_status0;        /* TRM HW status */
5218         uint32_t trm_hw_status1;        /* TRM HW status */
5219         uint32_t blink2;                /* branch link */
5220         uint32_t ilink1;                /* interrupt link */
5221         uint32_t ilink2;                /* interrupt link */
5222         uint32_t data1;         /* error-specific data */
5223         uint32_t data2;         /* error-specific data */
5224         uint32_t data3;         /* error-specific data */
5225         uint32_t bcon_time;             /* beacon timer */
5226         uint32_t tsf_low;               /* network timestamp function timer */
5227         uint32_t tsf_hi;                /* network timestamp function timer */
5228         uint32_t gp1;           /* GP1 timer register */
5229         uint32_t gp2;           /* GP2 timer register */
5230         uint32_t fw_rev_type;   /* firmware revision type */
5231         uint32_t major;         /* uCode version major */
5232         uint32_t minor;         /* uCode version minor */
5233         uint32_t hw_ver;                /* HW Silicon version */
5234         uint32_t brd_ver;               /* HW board version */
5235         uint32_t log_pc;                /* log program counter */
5236         uint32_t frame_ptr;             /* frame pointer */
5237         uint32_t stack_ptr;             /* stack pointer */
5238         uint32_t hcmd;          /* last host command header */
5239         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
5240                                  * rxtx_flag */
5241         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
5242                                  * host_flag */
5243         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
5244                                  * enc_flag */
5245         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
5246                                  * time_flag */
5247         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
5248                                  * wico interrupt */
5249         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
5250         uint32_t wait_event;            /* wait event() caller address */
5251         uint32_t l2p_control;   /* L2pControlField */
5252         uint32_t l2p_duration;  /* L2pDurationField */
5253         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
5254         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
5255         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
5256                                  * (LMPM_PMG_SEL) */
5257         uint32_t u_timestamp;   /* indicate when the date and time of the
5258                                  * compilation */
5259         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
5260 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5261
5262 /*
5263  * UMAC error struct - relevant starting from family 8000 chip.
5264  * Note: This structure is read from the device with IO accesses,
5265  * and the reading already does the endian conversion. As it is
5266  * read with u32-sized accesses, any members with a different size
5267  * need to be ordered correctly though!
5268  */
5269 struct iwm_umac_error_event_table {
5270         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5271         uint32_t error_id;      /* type of error */
5272         uint32_t blink1;        /* branch link */
5273         uint32_t blink2;        /* branch link */
5274         uint32_t ilink1;        /* interrupt link */
5275         uint32_t ilink2;        /* interrupt link */
5276         uint32_t data1;         /* error-specific data */
5277         uint32_t data2;         /* error-specific data */
5278         uint32_t data3;         /* error-specific data */
5279         uint32_t umac_major;
5280         uint32_t umac_minor;
5281         uint32_t frame_pointer; /* core register 27*/
5282         uint32_t stack_pointer; /* core register 28 */
5283         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
5284         uint32_t nic_isr_pref;  /* ISR status register */
5285 } __packed;
5286
5287 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5288 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5289
5290 #ifdef IWM_DEBUG
5291 struct {
5292         const char *name;
5293         uint8_t num;
5294 } advanced_lookup[] = {
5295         { "NMI_INTERRUPT_WDG", 0x34 },
5296         { "SYSASSERT", 0x35 },
5297         { "UCODE_VERSION_MISMATCH", 0x37 },
5298         { "BAD_COMMAND", 0x38 },
5299         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5300         { "FATAL_ERROR", 0x3D },
5301         { "NMI_TRM_HW_ERR", 0x46 },
5302         { "NMI_INTERRUPT_TRM", 0x4C },
5303         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5304         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5305         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5306         { "NMI_INTERRUPT_HOST", 0x66 },
5307         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5308         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5309         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5310         { "ADVANCED_SYSASSERT", 0 },
5311 };
5312
5313 static const char *
5314 iwm_desc_lookup(uint32_t num)
5315 {
5316         int i;
5317
5318         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5319                 if (advanced_lookup[i].num == num)
5320                         return advanced_lookup[i].name;
5321
5322         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5323         return advanced_lookup[i].name;
5324 }
5325
5326 static void
5327 iwm_nic_umac_error(struct iwm_softc *sc)
5328 {
5329         struct iwm_umac_error_event_table table;
5330         uint32_t base;
5331
5332         base = sc->umac_error_event_table;
5333
5334         if (base < 0x800000) {
5335                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5336                     base);
5337                 return;
5338         }
5339
5340         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5341                 device_printf(sc->sc_dev, "reading errlog failed\n");
5342                 return;
5343         }
5344
5345         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5346                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5347                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5348                     sc->sc_flags, table.valid);
5349         }
5350
5351         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5352                 iwm_desc_lookup(table.error_id));
5353         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5354         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5355         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5356             table.ilink1);
5357         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5358             table.ilink2);
5359         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5360         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5361         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5362         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5363         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5364         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5365             table.frame_pointer);
5366         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5367             table.stack_pointer);
5368         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5369         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5370             table.nic_isr_pref);
5371 }
5372
5373 /*
5374  * Support for dumping the error log seemed like a good idea ...
5375  * but it's mostly hex junk and the only sensible thing is the
5376  * hw/ucode revision (which we know anyway).  Since it's here,
5377  * I'll just leave it in, just in case e.g. the Intel guys want to
5378  * help us decipher some "ADVANCED_SYSASSERT" later.
5379  */
5380 static void
5381 iwm_nic_error(struct iwm_softc *sc)
5382 {
5383         struct iwm_error_event_table table;
5384         uint32_t base;
5385
5386         device_printf(sc->sc_dev, "dumping device error log\n");
5387         base = sc->error_event_table;
5388         if (base < 0x800000) {
5389                 device_printf(sc->sc_dev,
5390                     "Invalid error log pointer 0x%08x\n", base);
5391                 return;
5392         }
5393
5394         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5395                 device_printf(sc->sc_dev, "reading errlog failed\n");
5396                 return;
5397         }
5398
5399         if (!table.valid) {
5400                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5401                 return;
5402         }
5403
5404         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5405                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5406                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5407                     sc->sc_flags, table.valid);
5408         }
5409
5410         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5411             iwm_desc_lookup(table.error_id));
5412         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5413             table.trm_hw_status0);
5414         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5415             table.trm_hw_status1);
5416         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5417         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5418         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5419         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5420         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5421         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5422         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5423         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5424         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5425         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5426         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5427         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5428             table.fw_rev_type);
5429         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5430         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5431         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5432         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5433         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5434         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5435         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5436         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5437         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5438         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5439         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5440         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5441         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5442         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5443         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5444         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5445         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5446         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5447         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5448
5449         if (sc->umac_error_event_table)
5450                 iwm_nic_umac_error(sc);
5451 }
5452 #endif
5453
5454 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5455
5456 /*
5457  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5458  * Basic structure from if_iwn
5459  */
5460 static void
5461 iwm_notif_intr(struct iwm_softc *sc)
5462 {
5463         struct ieee80211com *ic = &sc->sc_ic;
5464         uint16_t hw;
5465
5466         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5467             BUS_DMASYNC_POSTREAD);
5468
5469         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5470
5471         /*
5472          * Process responses
5473          */
5474         while (sc->rxq.cur != hw) {
5475                 struct iwm_rx_ring *ring = &sc->rxq;
5476                 struct iwm_rx_data *data = &ring->data[ring->cur];
5477                 struct iwm_rx_packet *pkt;
5478                 struct iwm_cmd_response *cresp;
5479                 int qid, idx, code;
5480
5481                 bus_dmamap_sync(ring->data_dmat, data->map,
5482                     BUS_DMASYNC_POSTREAD);
5483                 pkt = mtod(data->m, struct iwm_rx_packet *);
5484
5485                 qid = pkt->hdr.qid & ~0x80;
5486                 idx = pkt->hdr.idx;
5487
5488                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5489                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5490                     "rx packet qid=%d idx=%d type=%x %d %d\n",
5491                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5492
5493                 /*
5494                  * randomly get these from the firmware, no idea why.
5495                  * they at least seem harmless, so just ignore them for now
5496                  */
5497                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5498                     || pkt->len_n_flags == htole32(0x55550000))) {
5499                         ADVANCE_RXQ(sc);
5500                         continue;
5501                 }
5502
5503                 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5504
5505                 switch (code) {
5506                 case IWM_REPLY_RX_PHY_CMD:
5507                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5508                         break;
5509
5510                 case IWM_REPLY_RX_MPDU_CMD:
5511                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5512                         break;
5513
5514                 case IWM_TX_CMD:
5515                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
5516                         break;
5517
5518                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5519                         struct iwm_missed_beacons_notif *resp;
5520                         int missed;
5521
5522                         /* XXX look at mac_id to determine interface ID */
5523                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5524
5525                         resp = (void *)pkt->data;
5526                         missed = le32toh(resp->consec_missed_beacons);
5527
5528                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5529                             "%s: MISSED_BEACON: mac_id=%d, "
5530                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5531                             "num_rx=%d\n",
5532                             __func__,
5533                             le32toh(resp->mac_id),
5534                             le32toh(resp->consec_missed_beacons_since_last_rx),
5535                             le32toh(resp->consec_missed_beacons),
5536                             le32toh(resp->num_expected_beacons),
5537                             le32toh(resp->num_recvd_beacons));
5538
5539                         /* Be paranoid */
5540                         if (vap == NULL)
5541                                 break;
5542
5543                         /* XXX no net80211 locking? */
5544                         if (vap->iv_state == IEEE80211_S_RUN &&
5545                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5546                                 if (missed > vap->iv_bmissthreshold) {
5547                                         /* XXX bad locking; turn into task */
5548                                         IWM_UNLOCK(sc);
5549                                         ieee80211_beacon_miss(ic);
5550                                         IWM_LOCK(sc);
5551                                 }
5552                         }
5553
5554                         break; }
5555
5556                 case IWM_MFUART_LOAD_NOTIFICATION:
5557                         break;
5558
5559                 case IWM_MVM_ALIVE:
5560                         break;
5561
5562                 case IWM_CALIB_RES_NOTIF_PHY_DB:
5563                         break;
5564
5565                 case IWM_STATISTICS_NOTIFICATION: {
5566                         struct iwm_notif_statistics *stats;
5567                         stats = (void *)pkt->data;
5568                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5569                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5570                         break; }
5571
5572                 case IWM_NVM_ACCESS_CMD:
5573                 case IWM_MCC_UPDATE_CMD:
5574                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5575                                 memcpy(sc->sc_cmd_resp,
5576                                     pkt, sizeof(sc->sc_cmd_resp));
5577                         }
5578                         break;
5579
5580                 case IWM_MCC_CHUB_UPDATE_CMD: {
5581                         struct iwm_mcc_chub_notif *notif;
5582                         notif = (void *)pkt->data;
5583
5584                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5585                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5586                         sc->sc_fw_mcc[2] = '\0';
5587                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5588                             "fw source %d sent CC '%s'\n",
5589                             notif->source_id, sc->sc_fw_mcc);
5590                         break; }
5591
5592                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5593                 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5594                                  IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5595                         struct iwm_dts_measurement_notif_v1 *notif;
5596
5597                         if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5598                                 device_printf(sc->sc_dev,
5599                                     "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5600                                 break;
5601                         }
5602                         notif = (void *)pkt->data;
5603                         IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5604                             "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5605                             notif->temp);
5606                         break;
5607                 }
5608
5609                 case IWM_PHY_CONFIGURATION_CMD:
5610                 case IWM_TX_ANT_CONFIGURATION_CMD:
5611                 case IWM_ADD_STA:
5612                 case IWM_MAC_CONTEXT_CMD:
5613                 case IWM_REPLY_SF_CFG_CMD:
5614                 case IWM_POWER_TABLE_CMD:
5615                 case IWM_PHY_CONTEXT_CMD:
5616                 case IWM_BINDING_CONTEXT_CMD:
5617                 case IWM_TIME_EVENT_CMD:
5618                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5619                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5620                 case IWM_SCAN_ABORT_UMAC:
5621                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5622                 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5623                 case IWM_REPLY_BEACON_FILTERING_CMD:
5624                 case IWM_MAC_PM_POWER_TABLE:
5625                 case IWM_TIME_QUOTA_CMD:
5626                 case IWM_REMOVE_STA:
5627                 case IWM_TXPATH_FLUSH:
5628                 case IWM_LQ_CMD:
5629                 case IWM_BT_CONFIG:
5630                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5631                         cresp = (void *)pkt->data;
5632                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5633                                 memcpy(sc->sc_cmd_resp,
5634                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5635                         }
5636                         break;
5637
5638                 /* ignore */
5639                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5640                         break;
5641
5642                 case IWM_INIT_COMPLETE_NOTIF:
5643                         break;
5644
5645                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5646                         struct iwm_periodic_scan_complete *notif;
5647                         notif = (void *)pkt->data;
5648                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5649                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5650                                 ieee80211_runtask(ic, &sc->sc_es_task);
5651                         }
5652                         break;
5653                 }
5654
5655                 case IWM_SCAN_ITERATION_COMPLETE: {
5656                         struct iwm_lmac_scan_complete_notif *notif;
5657                         notif = (void *)pkt->data;
5658                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5659                         break;
5660                 }
5661  
5662                 case IWM_SCAN_COMPLETE_UMAC: {
5663                         struct iwm_umac_scan_complete *notif;
5664                         notif = (void *)pkt->data;
5665
5666                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5667                             "UMAC scan complete, status=0x%x\n",
5668                             notif->status);
5669                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5670                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5671                                 ieee80211_runtask(ic, &sc->sc_es_task);
5672                         }
5673                         break;
5674                 }
5675
5676                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5677                         struct iwm_umac_scan_iter_complete_notif *notif;
5678                         notif = (void *)pkt->data;
5679
5680                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5681                             "complete, status=0x%x, %d channels scanned\n",
5682                             notif->status, notif->scanned_channels);
5683                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5684                         break;
5685                 }
5686
5687                 case IWM_REPLY_ERROR: {
5688                         struct iwm_error_resp *resp;
5689                         resp = (void *)pkt->data;
5690
5691                         device_printf(sc->sc_dev,
5692                             "firmware error 0x%x, cmd 0x%x\n",
5693                             le32toh(resp->error_type),
5694                             resp->cmd_id);
5695                         break;
5696                 }
5697
5698                 case IWM_TIME_EVENT_NOTIFICATION: {
5699                         struct iwm_time_event_notif *notif;
5700                         notif = (void *)pkt->data;
5701
5702                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5703                             "TE notif status = 0x%x action = 0x%x\n",
5704                             notif->status, notif->action);
5705                         break;
5706                 }
5707
5708                 case IWM_MCAST_FILTER_CMD:
5709                         break;
5710
5711                 case IWM_SCD_QUEUE_CFG: {
5712                         struct iwm_scd_txq_cfg_rsp *rsp;
5713                         rsp = (void *)pkt->data;
5714
5715                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5716                             "queue cfg token=0x%x sta_id=%d "
5717                             "tid=%d scd_queue=%d\n",
5718                             rsp->token, rsp->sta_id, rsp->tid,
5719                             rsp->scd_queue);
5720                         break;
5721                 }
5722
5723                 default:
5724                         device_printf(sc->sc_dev,
5725                             "frame %d/%d %x UNHANDLED (this should "
5726                             "not happen)\n", qid, idx,
5727                             pkt->len_n_flags);
5728                         break;
5729                 }
5730
5731                 /*
5732                  * Why test bit 0x80?  The Linux driver:
5733                  *
5734                  * There is one exception:  uCode sets bit 15 when it
5735                  * originates the response/notification, i.e. when the
5736                  * response/notification is not a direct response to a
5737                  * command sent by the driver.  For example, uCode issues
5738                  * IWM_REPLY_RX when it sends a received frame to the driver;
5739                  * it is not a direct response to any driver command.
5740                  *
5741                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5742                  * uses a slightly different format for pkt->hdr, and "qid"
5743                  * is actually the upper byte of a two-byte field.
5744                  */
5745                 if (!(pkt->hdr.qid & (1 << 7))) {
5746                         iwm_cmd_done(sc, pkt);
5747                 }
5748
5749                 ADVANCE_RXQ(sc);
5750         }
5751
5752         /*
5753          * Tell the firmware what we have processed.
5754          * Seems like the hardware gets upset unless we align
5755          * the write by 8??
5756          */
5757         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5758         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5759 }
5760
5761 static void
5762 iwm_intr(void *arg)
5763 {
5764         struct iwm_softc *sc = arg;
5765         int handled = 0;
5766         int r1, r2, rv = 0;
5767         int isperiodic = 0;
5768
5769         IWM_LOCK(sc);
5770         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5771
5772         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5773                 uint32_t *ict = sc->ict_dma.vaddr;
5774                 int tmp;
5775
5776                 tmp = htole32(ict[sc->ict_cur]);
5777                 if (!tmp)
5778                         goto out_ena;
5779
5780                 /*
5781                  * ok, there was something.  keep plowing until we have all.
5782                  */
5783                 r1 = r2 = 0;
5784                 while (tmp) {
5785                         r1 |= tmp;
5786                         ict[sc->ict_cur] = 0;
5787                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5788                         tmp = htole32(ict[sc->ict_cur]);
5789                 }
5790
5791                 /* this is where the fun begins.  don't ask */
5792                 if (r1 == 0xffffffff)
5793                         r1 = 0;
5794
5795                 /* i am not expected to understand this */
5796                 if (r1 & 0xc0000)
5797                         r1 |= 0x8000;
5798                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5799         } else {
5800                 r1 = IWM_READ(sc, IWM_CSR_INT);
5801                 /* "hardware gone" (where, fishing?) */
5802                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5803                         goto out;
5804                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5805         }
5806         if (r1 == 0 && r2 == 0) {
5807                 goto out_ena;
5808         }
5809
5810         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5811
5812         /* Safely ignore these bits for debug checks below */
5813         r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5814
5815         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5816                 int i;
5817                 struct ieee80211com *ic = &sc->sc_ic;
5818                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5819
5820 #ifdef IWM_DEBUG
5821                 iwm_nic_error(sc);
5822 #endif
5823                 /* Dump driver status (TX and RX rings) while we're here. */
5824                 device_printf(sc->sc_dev, "driver status:\n");
5825                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5826                         struct iwm_tx_ring *ring = &sc->txq[i];
5827                         device_printf(sc->sc_dev,
5828                             "  tx ring %2d: qid=%-2d cur=%-3d "
5829                             "queued=%-3d\n",
5830                             i, ring->qid, ring->cur, ring->queued);
5831                 }
5832                 device_printf(sc->sc_dev,
5833                     "  rx ring: cur=%d\n", sc->rxq.cur);
5834                 device_printf(sc->sc_dev,
5835                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5836
5837                 /* Don't stop the device; just do a VAP restart */
5838                 IWM_UNLOCK(sc);
5839
5840                 if (vap == NULL) {
5841                         printf("%s: null vap\n", __func__);
5842                         return;
5843                 }
5844
5845                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5846                     "restarting\n", __func__, vap->iv_state);
5847
5848                 /* XXX TODO: turn this into a callout/taskqueue */
5849                 ieee80211_restart_all(ic);
5850                 return;
5851         }
5852
5853         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5854                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5855                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5856                 iwm_stop(sc);
5857                 rv = 1;
5858                 goto out;
5859         }
5860
5861         /* firmware chunk loaded */
5862         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5863                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5864                 handled |= IWM_CSR_INT_BIT_FH_TX;
5865                 sc->sc_fw_chunk_done = 1;
5866                 wakeup(&sc->sc_fw);
5867         }
5868
5869         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5870                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5871                 if (iwm_check_rfkill(sc)) {
5872                         device_printf(sc->sc_dev,
5873                             "%s: rfkill switch, disabling interface\n",
5874                             __func__);
5875                         iwm_stop(sc);
5876                 }
5877         }
5878
5879         /*
5880          * The Linux driver uses periodic interrupts to avoid races.
5881          * We cargo-cult like it's going out of fashion.
5882          */
5883         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5884                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5885                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5886                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5887                         IWM_WRITE_1(sc,
5888                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5889                 isperiodic = 1;
5890         }
5891
5892         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5893                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5894                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5895
5896                 iwm_notif_intr(sc);
5897
5898                 /* enable periodic interrupt, see above */
5899                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5900                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5901                             IWM_CSR_INT_PERIODIC_ENA);
5902         }
5903
5904         if (__predict_false(r1 & ~handled))
5905                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5906                     "%s: unhandled interrupts: %x\n", __func__, r1);
5907         rv = 1;
5908
5909  out_ena:
5910         iwm_restore_interrupts(sc);
5911  out:
5912         IWM_UNLOCK(sc);
5913         return;
5914 }
5915
5916 /*
5917  * Autoconf glue-sniffing
5918  */
5919 #define PCI_VENDOR_INTEL                0x8086
5920 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5921 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5922 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5923 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5924 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5925 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5926 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5927 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5928 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5929 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5930
5931 static const struct iwm_devices {
5932         uint16_t        device;
5933         const char      *name;
5934 } iwm_devices[] = {
5935         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5936         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5937         { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5938         { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5939         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5940         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5941         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5942         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5943         { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5944         { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5945 };
5946
5947 static int
5948 iwm_probe(device_t dev)
5949 {
5950         int i;
5951
5952         for (i = 0; i < nitems(iwm_devices); i++) {
5953                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5954                     pci_get_device(dev) == iwm_devices[i].device) {
5955                         device_set_desc(dev, iwm_devices[i].name);
5956                         return (BUS_PROBE_DEFAULT);
5957                 }
5958         }
5959
5960         return (ENXIO);
5961 }
5962
5963 static int
5964 iwm_dev_check(device_t dev)
5965 {
5966         struct iwm_softc *sc;
5967
5968         sc = device_get_softc(dev);
5969
5970         switch (pci_get_device(dev)) {
5971         case PCI_PRODUCT_INTEL_WL_3160_1:
5972         case PCI_PRODUCT_INTEL_WL_3160_2:
5973                 sc->cfg = &iwm3160_cfg;
5974                 return (0);
5975         case PCI_PRODUCT_INTEL_WL_3165_1:
5976         case PCI_PRODUCT_INTEL_WL_3165_2:
5977                 sc->cfg = &iwm3165_cfg;
5978                 return (0);
5979         case PCI_PRODUCT_INTEL_WL_7260_1:
5980         case PCI_PRODUCT_INTEL_WL_7260_2:
5981                 sc->cfg = &iwm7260_cfg;
5982                 return (0);
5983         case PCI_PRODUCT_INTEL_WL_7265_1:
5984         case PCI_PRODUCT_INTEL_WL_7265_2:
5985                 sc->cfg = &iwm7265_cfg;
5986                 return (0);
5987         case PCI_PRODUCT_INTEL_WL_8260_1:
5988         case PCI_PRODUCT_INTEL_WL_8260_2:
5989                 sc->cfg = &iwm8260_cfg;
5990                 return (0);
5991         default:
5992                 device_printf(dev, "unknown adapter type\n");
5993                 return ENXIO;
5994         }
5995 }
5996
5997 /* PCI registers */
5998 #define PCI_CFG_RETRY_TIMEOUT   0x041
5999
6000 static int
6001 iwm_pci_attach(device_t dev)
6002 {
6003         struct iwm_softc *sc;
6004         int count, error, rid;
6005         uint16_t reg;
6006
6007         sc = device_get_softc(dev);
6008
6009         /* We disable the RETRY_TIMEOUT register (0x41) to keep
6010          * PCI Tx retries from interfering with C3 CPU state */
6011         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6012
6013         /* Enable bus-mastering and hardware bug workaround. */
6014         pci_enable_busmaster(dev);
6015         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
6016         /* if !MSI */
6017         if (reg & PCIM_STATUS_INTxSTATE) {
6018                 reg &= ~PCIM_STATUS_INTxSTATE;
6019         }
6020         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
6021
6022         rid = PCIR_BAR(0);
6023         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
6024             RF_ACTIVE);
6025         if (sc->sc_mem == NULL) {
6026                 device_printf(sc->sc_dev, "can't map mem space\n");
6027                 return (ENXIO);
6028         }
6029         sc->sc_st = rman_get_bustag(sc->sc_mem);
6030         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
6031
6032         /* Install interrupt handler. */
6033         count = 1;
6034         rid = 0;
6035         if (pci_alloc_msi(dev, &count) == 0)
6036                 rid = 1;
6037         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
6038             (rid != 0 ? 0 : RF_SHAREABLE));
6039         if (sc->sc_irq == NULL) {
6040                 device_printf(dev, "can't map interrupt\n");
6041                         return (ENXIO);
6042         }
6043         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
6044             NULL, iwm_intr, sc, &sc->sc_ih);
6045         if (sc->sc_ih == NULL) {
6046                 device_printf(dev, "can't establish interrupt");
6047                         return (ENXIO);
6048         }
6049         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
6050
6051         return (0);
6052 }
6053
6054 static void
6055 iwm_pci_detach(device_t dev)
6056 {
6057         struct iwm_softc *sc = device_get_softc(dev);
6058
6059         if (sc->sc_irq != NULL) {
6060                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
6061                 bus_release_resource(dev, SYS_RES_IRQ,
6062                     rman_get_rid(sc->sc_irq), sc->sc_irq);
6063                 pci_release_msi(dev);
6064         }
6065         if (sc->sc_mem != NULL)
6066                 bus_release_resource(dev, SYS_RES_MEMORY,
6067                     rman_get_rid(sc->sc_mem), sc->sc_mem);
6068 }
6069
6070
6071
6072 static int
6073 iwm_attach(device_t dev)
6074 {
6075         struct iwm_softc *sc = device_get_softc(dev);
6076         struct ieee80211com *ic = &sc->sc_ic;
6077         int error;
6078         int txq_i, i;
6079
6080         sc->sc_dev = dev;
6081         sc->sc_attached = 1;
6082         IWM_LOCK_INIT(sc);
6083         mbufq_init(&sc->sc_snd, ifqmaxlen);
6084         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
6085         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
6086         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
6087
6088         sc->sc_notif_wait = iwm_notification_wait_init(sc);
6089         if (sc->sc_notif_wait == NULL) {
6090                 device_printf(dev, "failed to init notification wait struct\n");
6091                 goto fail;
6092         }
6093
6094         /* Init phy db */
6095         sc->sc_phy_db = iwm_phy_db_init(sc);
6096         if (!sc->sc_phy_db) {
6097                 device_printf(dev, "Cannot init phy_db\n");
6098                 goto fail;
6099         }
6100
6101         /* PCI attach */
6102         error = iwm_pci_attach(dev);
6103         if (error != 0)
6104                 goto fail;
6105
6106         sc->sc_wantresp = -1;
6107
6108         /* Check device type */
6109         error = iwm_dev_check(dev);
6110         if (error != 0)
6111                 goto fail;
6112
6113         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6114         /*
6115          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6116          * changed, and now the revision step also includes bit 0-1 (no more
6117          * "dash" value). To keep hw_rev backwards compatible - we'll store it
6118          * in the old format.
6119          */
6120         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6121                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6122                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6123
6124         if (iwm_prepare_card_hw(sc) != 0) {
6125                 device_printf(dev, "could not initialize hardware\n");
6126                 goto fail;
6127         }
6128
6129         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6130                 int ret;
6131                 uint32_t hw_step;
6132
6133                 /*
6134                  * In order to recognize C step the driver should read the
6135                  * chip version id located at the AUX bus MISC address.
6136                  */
6137                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6138                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6139                 DELAY(2);
6140
6141                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6142                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6143                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6144                                    25000);
6145                 if (!ret) {
6146                         device_printf(sc->sc_dev,
6147                             "Failed to wake up the nic\n");
6148                         goto fail;
6149                 }
6150
6151                 if (iwm_nic_lock(sc)) {
6152                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6153                         hw_step |= IWM_ENABLE_WFPM;
6154                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6155                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6156                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6157                         if (hw_step == 0x3)
6158                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6159                                                 (IWM_SILICON_C_STEP << 2);
6160                         iwm_nic_unlock(sc);
6161                 } else {
6162                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
6163                         goto fail;
6164                 }
6165         }
6166
6167         /* special-case 7265D, it has the same PCI IDs. */
6168         if (sc->cfg == &iwm7265_cfg &&
6169             (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6170                 sc->cfg = &iwm7265d_cfg;
6171         }
6172
6173         /* Allocate DMA memory for firmware transfers. */
6174         if ((error = iwm_alloc_fwmem(sc)) != 0) {
6175                 device_printf(dev, "could not allocate memory for firmware\n");
6176                 goto fail;
6177         }
6178
6179         /* Allocate "Keep Warm" page. */
6180         if ((error = iwm_alloc_kw(sc)) != 0) {
6181                 device_printf(dev, "could not allocate keep warm page\n");
6182                 goto fail;
6183         }
6184
6185         /* We use ICT interrupts */
6186         if ((error = iwm_alloc_ict(sc)) != 0) {
6187                 device_printf(dev, "could not allocate ICT table\n");
6188                 goto fail;
6189         }
6190
6191         /* Allocate TX scheduler "rings". */
6192         if ((error = iwm_alloc_sched(sc)) != 0) {
6193                 device_printf(dev, "could not allocate TX scheduler rings\n");
6194                 goto fail;
6195         }
6196
6197         /* Allocate TX rings */
6198         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6199                 if ((error = iwm_alloc_tx_ring(sc,
6200                     &sc->txq[txq_i], txq_i)) != 0) {
6201                         device_printf(dev,
6202                             "could not allocate TX ring %d\n",
6203                             txq_i);
6204                         goto fail;
6205                 }
6206         }
6207
6208         /* Allocate RX ring. */
6209         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6210                 device_printf(dev, "could not allocate RX ring\n");
6211                 goto fail;
6212         }
6213
6214         /* Clear pending interrupts. */
6215         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6216
6217         ic->ic_softc = sc;
6218         ic->ic_name = device_get_nameunit(sc->sc_dev);
6219         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
6220         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
6221
6222         /* Set device capabilities. */
6223         ic->ic_caps =
6224             IEEE80211_C_STA |
6225             IEEE80211_C_WPA |           /* WPA/RSN */
6226             IEEE80211_C_WME |
6227             IEEE80211_C_SHSLOT |        /* short slot time supported */
6228             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
6229 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
6230             ;
6231         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6232                 sc->sc_phyctxt[i].id = i;
6233                 sc->sc_phyctxt[i].color = 0;
6234                 sc->sc_phyctxt[i].ref = 0;
6235                 sc->sc_phyctxt[i].channel = NULL;
6236         }
6237
6238         /* Default noise floor */
6239         sc->sc_noise = -96;
6240
6241         /* Max RSSI */
6242         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6243
6244         sc->sc_preinit_hook.ich_func = iwm_preinit;
6245         sc->sc_preinit_hook.ich_arg = sc;
6246         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6247                 device_printf(dev, "config_intrhook_establish failed\n");
6248                 goto fail;
6249         }
6250
6251 #ifdef IWM_DEBUG
6252         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6253             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6254             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6255 #endif
6256
6257         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6258             "<-%s\n", __func__);
6259
6260         return 0;
6261
6262         /* Free allocated memory if something failed during attachment. */
6263 fail:
6264         iwm_detach_local(sc, 0);
6265
6266         return ENXIO;
6267 }
6268
6269 static int
6270 iwm_is_valid_ether_addr(uint8_t *addr)
6271 {
6272         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6273
6274         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6275                 return (FALSE);
6276
6277         return (TRUE);
6278 }
6279
6280 static int
6281 iwm_update_edca(struct ieee80211com *ic)
6282 {
6283         struct iwm_softc *sc = ic->ic_softc;
6284
6285         device_printf(sc->sc_dev, "%s: called\n", __func__);
6286         return (0);
6287 }
6288
6289 static void
6290 iwm_preinit(void *arg)
6291 {
6292         struct iwm_softc *sc = arg;
6293         device_t dev = sc->sc_dev;
6294         struct ieee80211com *ic = &sc->sc_ic;
6295         int error;
6296
6297         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6298             "->%s\n", __func__);
6299
6300         IWM_LOCK(sc);
6301         if ((error = iwm_start_hw(sc)) != 0) {
6302                 device_printf(dev, "could not initialize hardware\n");
6303                 IWM_UNLOCK(sc);
6304                 goto fail;
6305         }
6306
6307         error = iwm_run_init_mvm_ucode(sc, 1);
6308         iwm_stop_device(sc);
6309         if (error) {
6310                 IWM_UNLOCK(sc);
6311                 goto fail;
6312         }
6313         device_printf(dev,
6314             "hw rev 0x%x, fw ver %s, address %s\n",
6315             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6316             sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6317
6318         /* not all hardware can do 5GHz band */
6319         if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6320                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6321                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6322         IWM_UNLOCK(sc);
6323
6324         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6325             ic->ic_channels);
6326
6327         /*
6328          * At this point we've committed - if we fail to do setup,
6329          * we now also have to tear down the net80211 state.
6330          */
6331         ieee80211_ifattach(ic);
6332         ic->ic_vap_create = iwm_vap_create;
6333         ic->ic_vap_delete = iwm_vap_delete;
6334         ic->ic_raw_xmit = iwm_raw_xmit;
6335         ic->ic_node_alloc = iwm_node_alloc;
6336         ic->ic_scan_start = iwm_scan_start;
6337         ic->ic_scan_end = iwm_scan_end;
6338         ic->ic_update_mcast = iwm_update_mcast;
6339         ic->ic_getradiocaps = iwm_init_channel_map;
6340         ic->ic_set_channel = iwm_set_channel;
6341         ic->ic_scan_curchan = iwm_scan_curchan;
6342         ic->ic_scan_mindwell = iwm_scan_mindwell;
6343         ic->ic_wme.wme_update = iwm_update_edca;
6344         ic->ic_parent = iwm_parent;
6345         ic->ic_transmit = iwm_transmit;
6346         iwm_radiotap_attach(sc);
6347         if (bootverbose)
6348                 ieee80211_announce(ic);
6349
6350         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6351             "<-%s\n", __func__);
6352         config_intrhook_disestablish(&sc->sc_preinit_hook);
6353
6354         return;
6355 fail:
6356         config_intrhook_disestablish(&sc->sc_preinit_hook);
6357         iwm_detach_local(sc, 0);
6358 }
6359
6360 /*
6361  * Attach the interface to 802.11 radiotap.
6362  */
6363 static void
6364 iwm_radiotap_attach(struct iwm_softc *sc)
6365 {
6366         struct ieee80211com *ic = &sc->sc_ic;
6367
6368         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6369             "->%s begin\n", __func__);
6370         ieee80211_radiotap_attach(ic,
6371             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6372                 IWM_TX_RADIOTAP_PRESENT,
6373             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6374                 IWM_RX_RADIOTAP_PRESENT);
6375         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6376             "->%s end\n", __func__);
6377 }
6378
6379 static struct ieee80211vap *
6380 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6381     enum ieee80211_opmode opmode, int flags,
6382     const uint8_t bssid[IEEE80211_ADDR_LEN],
6383     const uint8_t mac[IEEE80211_ADDR_LEN])
6384 {
6385         struct iwm_vap *ivp;
6386         struct ieee80211vap *vap;
6387
6388         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6389                 return NULL;
6390         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6391         vap = &ivp->iv_vap;
6392         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6393         vap->iv_bmissthreshold = 10;            /* override default */
6394         /* Override with driver methods. */
6395         ivp->iv_newstate = vap->iv_newstate;
6396         vap->iv_newstate = iwm_newstate;
6397
6398         ieee80211_ratectl_init(vap);
6399         /* Complete setup. */
6400         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6401             mac);
6402         ic->ic_opmode = opmode;
6403
6404         return vap;
6405 }
6406
6407 static void
6408 iwm_vap_delete(struct ieee80211vap *vap)
6409 {
6410         struct iwm_vap *ivp = IWM_VAP(vap);
6411
6412         ieee80211_ratectl_deinit(vap);
6413         ieee80211_vap_detach(vap);
6414         free(ivp, M_80211_VAP);
6415 }
6416
6417 static void
6418 iwm_scan_start(struct ieee80211com *ic)
6419 {
6420         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6421         struct iwm_softc *sc = ic->ic_softc;
6422         int error;
6423
6424         IWM_LOCK(sc);
6425         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6426                 /* This should not be possible */
6427                 device_printf(sc->sc_dev,
6428                     "%s: Previous scan not completed yet\n", __func__);
6429         }
6430         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6431                 error = iwm_mvm_umac_scan(sc);
6432         else
6433                 error = iwm_mvm_lmac_scan(sc);
6434         if (error != 0) {
6435                 device_printf(sc->sc_dev, "could not initiate scan\n");
6436                 IWM_UNLOCK(sc);
6437                 ieee80211_cancel_scan(vap);
6438         } else {
6439                 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6440                 iwm_led_blink_start(sc);
6441                 IWM_UNLOCK(sc);
6442         }
6443 }
6444
6445 static void
6446 iwm_scan_end(struct ieee80211com *ic)
6447 {
6448         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6449         struct iwm_softc *sc = ic->ic_softc;
6450
6451         IWM_LOCK(sc);
6452         iwm_led_blink_stop(sc);
6453         if (vap->iv_state == IEEE80211_S_RUN)
6454                 iwm_mvm_led_enable(sc);
6455         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6456                 /*
6457                  * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6458                  * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6459                  * taskqueue.
6460                  */
6461                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6462                 iwm_mvm_scan_stop_wait(sc);
6463         }
6464         IWM_UNLOCK(sc);
6465
6466         /*
6467          * Make sure we don't race, if sc_es_task is still enqueued here.
6468          * This is to make sure that it won't call ieee80211_scan_done
6469          * when we have already started the next scan.
6470          */
6471         taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6472 }
6473
6474 static void
6475 iwm_update_mcast(struct ieee80211com *ic)
6476 {
6477 }
6478
6479 static void
6480 iwm_set_channel(struct ieee80211com *ic)
6481 {
6482 }
6483
6484 static void
6485 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6486 {
6487 }
6488
6489 static void
6490 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6491 {
6492         return;
6493 }
6494
6495 void
6496 iwm_init_task(void *arg1)
6497 {
6498         struct iwm_softc *sc = arg1;
6499
6500         IWM_LOCK(sc);
6501         while (sc->sc_flags & IWM_FLAG_BUSY)
6502                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6503         sc->sc_flags |= IWM_FLAG_BUSY;
6504         iwm_stop(sc);
6505         if (sc->sc_ic.ic_nrunning > 0)
6506                 iwm_init(sc);
6507         sc->sc_flags &= ~IWM_FLAG_BUSY;
6508         wakeup(&sc->sc_flags);
6509         IWM_UNLOCK(sc);
6510 }
6511
6512 static int
6513 iwm_resume(device_t dev)
6514 {
6515         struct iwm_softc *sc = device_get_softc(dev);
6516         int do_reinit = 0;
6517
6518         /*
6519          * We disable the RETRY_TIMEOUT register (0x41) to keep
6520          * PCI Tx retries from interfering with C3 CPU state.
6521          */
6522         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6523         iwm_init_task(device_get_softc(dev));
6524
6525         IWM_LOCK(sc);
6526         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6527                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6528                 do_reinit = 1;
6529         }
6530         IWM_UNLOCK(sc);
6531
6532         if (do_reinit)
6533                 ieee80211_resume_all(&sc->sc_ic);
6534
6535         return 0;
6536 }
6537
6538 static int
6539 iwm_suspend(device_t dev)
6540 {
6541         int do_stop = 0;
6542         struct iwm_softc *sc = device_get_softc(dev);
6543
6544         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6545
6546         ieee80211_suspend_all(&sc->sc_ic);
6547
6548         if (do_stop) {
6549                 IWM_LOCK(sc);
6550                 iwm_stop(sc);
6551                 sc->sc_flags |= IWM_FLAG_SCANNING;
6552                 IWM_UNLOCK(sc);
6553         }
6554
6555         return (0);
6556 }
6557
6558 static int
6559 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6560 {
6561         struct iwm_fw_info *fw = &sc->sc_fw;
6562         device_t dev = sc->sc_dev;
6563         int i;
6564
6565         if (!sc->sc_attached)
6566                 return 0;
6567         sc->sc_attached = 0;
6568
6569         if (do_net80211)
6570                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6571
6572         callout_drain(&sc->sc_led_blink_to);
6573         callout_drain(&sc->sc_watchdog_to);
6574         iwm_stop_device(sc);
6575         if (do_net80211) {
6576                 ieee80211_ifdetach(&sc->sc_ic);
6577         }
6578
6579         iwm_phy_db_free(sc->sc_phy_db);
6580         sc->sc_phy_db = NULL;
6581
6582         iwm_free_nvm_data(sc->nvm_data);
6583
6584         /* Free descriptor rings */
6585         iwm_free_rx_ring(sc, &sc->rxq);
6586         for (i = 0; i < nitems(sc->txq); i++)
6587                 iwm_free_tx_ring(sc, &sc->txq[i]);
6588
6589         /* Free firmware */
6590         if (fw->fw_fp != NULL)
6591                 iwm_fw_info_free(fw);
6592
6593         /* Free scheduler */
6594         iwm_dma_contig_free(&sc->sched_dma);
6595         iwm_dma_contig_free(&sc->ict_dma);
6596         iwm_dma_contig_free(&sc->kw_dma);
6597         iwm_dma_contig_free(&sc->fw_dma);
6598
6599         /* Finished with the hardware - detach things */
6600         iwm_pci_detach(dev);
6601
6602         if (sc->sc_notif_wait != NULL) {
6603                 iwm_notification_wait_free(sc->sc_notif_wait);
6604                 sc->sc_notif_wait = NULL;
6605         }
6606
6607         mbufq_drain(&sc->sc_snd);
6608         IWM_LOCK_DESTROY(sc);
6609
6610         return (0);
6611 }
6612
6613 static int
6614 iwm_detach(device_t dev)
6615 {
6616         struct iwm_softc *sc = device_get_softc(dev);
6617
6618         return (iwm_detach_local(sc, 1));
6619 }
6620
6621 static device_method_t iwm_pci_methods[] = {
6622         /* Device interface */
6623         DEVMETHOD(device_probe,         iwm_probe),
6624         DEVMETHOD(device_attach,        iwm_attach),
6625         DEVMETHOD(device_detach,        iwm_detach),
6626         DEVMETHOD(device_suspend,       iwm_suspend),
6627         DEVMETHOD(device_resume,        iwm_resume),
6628
6629         DEVMETHOD_END
6630 };
6631
6632 static driver_t iwm_pci_driver = {
6633         "iwm",
6634         iwm_pci_methods,
6635         sizeof (struct iwm_softc)
6636 };
6637
6638 static devclass_t iwm_devclass;
6639
6640 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6641 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6642 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6643 MODULE_DEPEND(iwm, wlan, 1, 1, 1);