]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
[iwm] Remove OpenBSD-specific comment. Beautify pci cfg space accesses.
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109
110 #include <sys/param.h>
111 #include <sys/bus.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
126
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
130
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
133
134 #include <net/bpf.h>
135
136 #include <net/if.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
142
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
147
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
152
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_notif_wait.h>
157 #include <dev/iwm/if_iwm_util.h>
158 #include <dev/iwm/if_iwm_binding.h>
159 #include <dev/iwm/if_iwm_phy_db.h>
160 #include <dev/iwm/if_iwm_mac_ctxt.h>
161 #include <dev/iwm/if_iwm_phy_ctxt.h>
162 #include <dev/iwm/if_iwm_time_event.h>
163 #include <dev/iwm/if_iwm_power.h>
164 #include <dev/iwm/if_iwm_scan.h>
165
166 #include <dev/iwm/if_iwm_pcie_trans.h>
167 #include <dev/iwm/if_iwm_led.h>
168
169 #define IWM_NVM_HW_SECTION_NUM_FAMILY_7000      0
170 #define IWM_NVM_HW_SECTION_NUM_FAMILY_8000      10
171
172 /* lower blocks contain EEPROM image and calibration data */
173 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000      (16 * 512 * sizeof(uint16_t)) /* 16 KB */
174 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000      (32 * 512 * sizeof(uint16_t)) /* 32 KB */
175
176 #define IWM7260_FW      "iwm7260fw"
177 #define IWM3160_FW      "iwm3160fw"
178 #define IWM7265_FW      "iwm7265fw"
179 #define IWM7265D_FW     "iwm7265Dfw"
180 #define IWM8000_FW      "iwm8000Cfw"
181
182 #define IWM_DEVICE_7000_COMMON                                          \
183         .device_family = IWM_DEVICE_FAMILY_7000,                        \
184         .eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000,              \
185         .nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_7000,       \
186         .apmg_wake_up_wa = 1
187
188 const struct iwm_cfg iwm7260_cfg = {
189         .fw_name = IWM7260_FW,
190         IWM_DEVICE_7000_COMMON,
191         .host_interrupt_operation_mode = 1,
192 };
193
194 const struct iwm_cfg iwm3160_cfg = {
195         .fw_name = IWM3160_FW,
196         IWM_DEVICE_7000_COMMON,
197         .host_interrupt_operation_mode = 1,
198 };
199
200 const struct iwm_cfg iwm3165_cfg = {
201         /* XXX IWM7265D_FW doesn't seem to work properly yet */
202         .fw_name = IWM7265_FW,
203         IWM_DEVICE_7000_COMMON,
204         .host_interrupt_operation_mode = 0,
205 };
206
207 const struct iwm_cfg iwm7265_cfg = {
208         .fw_name = IWM7265_FW,
209         IWM_DEVICE_7000_COMMON,
210         .host_interrupt_operation_mode = 0,
211 };
212
213 const struct iwm_cfg iwm7265d_cfg = {
214         /* XXX IWM7265D_FW doesn't seem to work properly yet */
215         .fw_name = IWM7265_FW,
216         IWM_DEVICE_7000_COMMON,
217         .host_interrupt_operation_mode = 0,
218 };
219
220 #define IWM_DEVICE_8000_COMMON                                          \
221         .device_family = IWM_DEVICE_FAMILY_8000,                        \
222         .eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000,              \
223         .nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_8000
224
225 const struct iwm_cfg iwm8260_cfg = {
226         .fw_name = IWM8000_FW,
227         IWM_DEVICE_8000_COMMON,
228         .host_interrupt_operation_mode = 0,
229 };
230
231 const uint8_t iwm_nvm_channels[] = {
232         /* 2.4 GHz */
233         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
234         /* 5 GHz */
235         36, 40, 44, 48, 52, 56, 60, 64,
236         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
237         149, 153, 157, 161, 165
238 };
239 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
240     "IWM_NUM_CHANNELS is too small");
241
242 const uint8_t iwm_nvm_channels_8000[] = {
243         /* 2.4 GHz */
244         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
245         /* 5 GHz */
246         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
247         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
248         149, 153, 157, 161, 165, 169, 173, 177, 181
249 };
250 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
251     "IWM_NUM_CHANNELS_8000 is too small");
252
253 #define IWM_NUM_2GHZ_CHANNELS   14
254 #define IWM_N_HW_ADDR_MASK      0xF
255
256 /*
257  * XXX For now, there's simply a fixed set of rate table entries
258  * that are populated.
259  */
260 const struct iwm_rate {
261         uint8_t rate;
262         uint8_t plcp;
263 } iwm_rates[] = {
264         {   2,  IWM_RATE_1M_PLCP  },
265         {   4,  IWM_RATE_2M_PLCP  },
266         {  11,  IWM_RATE_5M_PLCP  },
267         {  22,  IWM_RATE_11M_PLCP },
268         {  12,  IWM_RATE_6M_PLCP  },
269         {  18,  IWM_RATE_9M_PLCP  },
270         {  24,  IWM_RATE_12M_PLCP },
271         {  36,  IWM_RATE_18M_PLCP },
272         {  48,  IWM_RATE_24M_PLCP },
273         {  72,  IWM_RATE_36M_PLCP },
274         {  96,  IWM_RATE_48M_PLCP },
275         { 108,  IWM_RATE_54M_PLCP },
276 };
277 #define IWM_RIDX_CCK    0
278 #define IWM_RIDX_OFDM   4
279 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
280 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
281 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
282
283 struct iwm_nvm_section {
284         uint16_t length;
285         uint8_t *data;
286 };
287
288 #define IWM_MVM_UCODE_ALIVE_TIMEOUT     hz
289 #define IWM_MVM_UCODE_CALIB_TIMEOUT     (2*hz)
290
291 struct iwm_mvm_alive_data {
292         int valid;
293         uint32_t scd_base_addr;
294 };
295
296 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
297 static int      iwm_firmware_store_section(struct iwm_softc *,
298                                            enum iwm_ucode_type,
299                                            const uint8_t *, size_t);
300 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
301 static void     iwm_fw_info_free(struct iwm_fw_info *);
302 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
303 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
304 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
305                                      bus_size_t, bus_size_t);
306 static void     iwm_dma_contig_free(struct iwm_dma_info *);
307 static int      iwm_alloc_fwmem(struct iwm_softc *);
308 static int      iwm_alloc_sched(struct iwm_softc *);
309 static int      iwm_alloc_kw(struct iwm_softc *);
310 static int      iwm_alloc_ict(struct iwm_softc *);
311 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
312 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
313 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
314 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
315                                   int);
316 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
317 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
318 static void     iwm_enable_interrupts(struct iwm_softc *);
319 static void     iwm_restore_interrupts(struct iwm_softc *);
320 static void     iwm_disable_interrupts(struct iwm_softc *);
321 static void     iwm_ict_reset(struct iwm_softc *);
322 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
323 static void     iwm_stop_device(struct iwm_softc *);
324 static void     iwm_mvm_nic_config(struct iwm_softc *);
325 static int      iwm_nic_rx_init(struct iwm_softc *);
326 static int      iwm_nic_tx_init(struct iwm_softc *);
327 static int      iwm_nic_init(struct iwm_softc *);
328 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
329 static int      iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
330 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
331                                    uint16_t, uint8_t *, uint16_t *);
332 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
333                                      uint16_t *, uint32_t);
334 static uint32_t iwm_eeprom_channel_flags(uint16_t);
335 static void     iwm_add_channel_band(struct iwm_softc *,
336                     struct ieee80211_channel[], int, int *, int, size_t,
337                     const uint8_t[]);
338 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
339                     struct ieee80211_channel[]);
340 static struct iwm_nvm_data *
341         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
342                            const uint16_t *, const uint16_t *,
343                            const uint16_t *, const uint16_t *,
344                            const uint16_t *);
345 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
346 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
347                                                struct iwm_nvm_data *,
348                                                const uint16_t *,
349                                                const uint16_t *);
350 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
351                             const uint16_t *);
352 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
353 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
354                                   const uint16_t *);
355 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
356                                    const uint16_t *);
357 static void     iwm_set_radio_cfg(const struct iwm_softc *,
358                                   struct iwm_nvm_data *, uint32_t);
359 static struct iwm_nvm_data *
360         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
361 static int      iwm_nvm_init(struct iwm_softc *);
362 static int      iwm_pcie_load_section(struct iwm_softc *, uint8_t,
363                                       const struct iwm_fw_desc *);
364 static int      iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
365                                              bus_addr_t, uint32_t);
366 static int      iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
367                                                 const struct iwm_fw_sects *,
368                                                 int, int *);
369 static int      iwm_pcie_load_cpu_sections(struct iwm_softc *,
370                                            const struct iwm_fw_sects *,
371                                            int, int *);
372 static int      iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
373                                                const struct iwm_fw_sects *);
374 static int      iwm_pcie_load_given_ucode(struct iwm_softc *,
375                                           const struct iwm_fw_sects *);
376 static int      iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
377 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
378 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
379 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
380                                               enum iwm_ucode_type);
381 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
382 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
383 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
384 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
385                                             struct iwm_rx_phy_info *);
386 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
387                                       struct iwm_rx_packet *,
388                                       struct iwm_rx_data *);
389 static int      iwm_get_noise(struct iwm_softc *sc,
390                     const struct iwm_mvm_statistics_rx_non_phy *);
391 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
392                                    struct iwm_rx_data *);
393 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
394                                          struct iwm_rx_packet *,
395                                          struct iwm_node *);
396 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
397                                   struct iwm_rx_data *);
398 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
399 #if 0
400 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
401                                  uint16_t);
402 #endif
403 static const struct iwm_rate *
404         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
405                         struct mbuf *, struct iwm_tx_cmd *);
406 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
407                        struct ieee80211_node *, int);
408 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
409                              const struct ieee80211_bpf_params *);
410 static int      iwm_mvm_flush_tx_path(struct iwm_softc *sc,
411                                       uint32_t tfd_msk, uint32_t flags);
412 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
413                                                 struct iwm_mvm_add_sta_cmd_v7 *,
414                                                 int *);
415 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
416                                        int);
417 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
418 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
419 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
420                                            struct iwm_int_sta *,
421                                            const uint8_t *, uint16_t, uint16_t);
422 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
423 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
424 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
425 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
426 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
427 static struct ieee80211_node *
428                 iwm_node_alloc(struct ieee80211vap *,
429                                const uint8_t[IEEE80211_ADDR_LEN]);
430 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
431 static int      iwm_media_change(struct ifnet *);
432 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
433 static void     iwm_endscan_cb(void *, int);
434 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
435                                         struct iwm_sf_cfg_cmd *,
436                                         struct ieee80211_node *);
437 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
438 static int      iwm_send_bt_init_conf(struct iwm_softc *);
439 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
440 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
441 static int      iwm_init_hw(struct iwm_softc *);
442 static void     iwm_init(struct iwm_softc *);
443 static void     iwm_start(struct iwm_softc *);
444 static void     iwm_stop(struct iwm_softc *);
445 static void     iwm_watchdog(void *);
446 static void     iwm_parent(struct ieee80211com *);
447 #ifdef IWM_DEBUG
448 static const char *
449                 iwm_desc_lookup(uint32_t);
450 static void     iwm_nic_error(struct iwm_softc *);
451 static void     iwm_nic_umac_error(struct iwm_softc *);
452 #endif
453 static void     iwm_notif_intr(struct iwm_softc *);
454 static void     iwm_intr(void *);
455 static int      iwm_attach(device_t);
456 static int      iwm_is_valid_ether_addr(uint8_t *);
457 static void     iwm_preinit(void *);
458 static int      iwm_detach_local(struct iwm_softc *sc, int);
459 static void     iwm_init_task(void *);
460 static void     iwm_radiotap_attach(struct iwm_softc *);
461 static struct ieee80211vap *
462                 iwm_vap_create(struct ieee80211com *,
463                                const char [IFNAMSIZ], int,
464                                enum ieee80211_opmode, int,
465                                const uint8_t [IEEE80211_ADDR_LEN],
466                                const uint8_t [IEEE80211_ADDR_LEN]);
467 static void     iwm_vap_delete(struct ieee80211vap *);
468 static void     iwm_scan_start(struct ieee80211com *);
469 static void     iwm_scan_end(struct ieee80211com *);
470 static void     iwm_update_mcast(struct ieee80211com *);
471 static void     iwm_set_channel(struct ieee80211com *);
472 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
473 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
474 static int      iwm_detach(device_t);
475
476 /*
477  * Firmware parser.
478  */
479
480 static int
481 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
482 {
483         const struct iwm_fw_cscheme_list *l = (const void *)data;
484
485         if (dlen < sizeof(*l) ||
486             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
487                 return EINVAL;
488
489         /* we don't actually store anything for now, always use s/w crypto */
490
491         return 0;
492 }
493
494 static int
495 iwm_firmware_store_section(struct iwm_softc *sc,
496     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
497 {
498         struct iwm_fw_sects *fws;
499         struct iwm_fw_desc *fwone;
500
501         if (type >= IWM_UCODE_TYPE_MAX)
502                 return EINVAL;
503         if (dlen < sizeof(uint32_t))
504                 return EINVAL;
505
506         fws = &sc->sc_fw.fw_sects[type];
507         if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
508                 return EINVAL;
509
510         fwone = &fws->fw_sect[fws->fw_count];
511
512         /* first 32bit are device load offset */
513         memcpy(&fwone->offset, data, sizeof(uint32_t));
514
515         /* rest is data */
516         fwone->data = data + sizeof(uint32_t);
517         fwone->len = dlen - sizeof(uint32_t);
518
519         fws->fw_count++;
520
521         return 0;
522 }
523
524 #define IWM_DEFAULT_SCAN_CHANNELS 40
525
526 /* iwlwifi: iwl-drv.c */
527 struct iwm_tlv_calib_data {
528         uint32_t ucode_type;
529         struct iwm_tlv_calib_ctrl calib;
530 } __packed;
531
532 static int
533 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
534 {
535         const struct iwm_tlv_calib_data *def_calib = data;
536         uint32_t ucode_type = le32toh(def_calib->ucode_type);
537
538         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
539                 device_printf(sc->sc_dev,
540                     "Wrong ucode_type %u for default "
541                     "calibration.\n", ucode_type);
542                 return EINVAL;
543         }
544
545         sc->sc_default_calib[ucode_type].flow_trigger =
546             def_calib->calib.flow_trigger;
547         sc->sc_default_calib[ucode_type].event_trigger =
548             def_calib->calib.event_trigger;
549
550         return 0;
551 }
552
553 static void
554 iwm_fw_info_free(struct iwm_fw_info *fw)
555 {
556         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
557         fw->fw_fp = NULL;
558         /* don't touch fw->fw_status */
559         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
560 }
561
562 static int
563 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
564 {
565         struct iwm_fw_info *fw = &sc->sc_fw;
566         const struct iwm_tlv_ucode_header *uhdr;
567         struct iwm_ucode_tlv tlv;
568         enum iwm_ucode_tlv_type tlv_type;
569         const struct firmware *fwp;
570         const uint8_t *data;
571         uint32_t usniffer_img;
572         uint32_t paging_mem_size;
573         int num_of_cpus;
574         int error = 0;
575         size_t len;
576
577         if (fw->fw_status == IWM_FW_STATUS_DONE &&
578             ucode_type != IWM_UCODE_INIT)
579                 return 0;
580
581         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
582                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
583         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
584
585         if (fw->fw_fp != NULL)
586                 iwm_fw_info_free(fw);
587
588         /*
589          * Load firmware into driver memory.
590          * fw_fp will be set.
591          */
592         IWM_UNLOCK(sc);
593         fwp = firmware_get(sc->cfg->fw_name);
594         IWM_LOCK(sc);
595         if (fwp == NULL) {
596                 device_printf(sc->sc_dev,
597                     "could not read firmware %s (error %d)\n",
598                     sc->cfg->fw_name, error);
599                 goto out;
600         }
601         fw->fw_fp = fwp;
602
603         /* (Re-)Initialize default values. */
604         sc->sc_capaflags = 0;
605         sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
606         memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
607         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
608
609         /*
610          * Parse firmware contents
611          */
612
613         uhdr = (const void *)fw->fw_fp->data;
614         if (*(const uint32_t *)fw->fw_fp->data != 0
615             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
616                 device_printf(sc->sc_dev, "invalid firmware %s\n",
617                     sc->cfg->fw_name);
618                 error = EINVAL;
619                 goto out;
620         }
621
622         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
623             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
624             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
625             IWM_UCODE_API(le32toh(uhdr->ver)));
626         data = uhdr->data;
627         len = fw->fw_fp->datasize - sizeof(*uhdr);
628
629         while (len >= sizeof(tlv)) {
630                 size_t tlv_len;
631                 const void *tlv_data;
632
633                 memcpy(&tlv, data, sizeof(tlv));
634                 tlv_len = le32toh(tlv.length);
635                 tlv_type = le32toh(tlv.type);
636
637                 len -= sizeof(tlv);
638                 data += sizeof(tlv);
639                 tlv_data = data;
640
641                 if (len < tlv_len) {
642                         device_printf(sc->sc_dev,
643                             "firmware too short: %zu bytes\n",
644                             len);
645                         error = EINVAL;
646                         goto parse_out;
647                 }
648
649                 switch ((int)tlv_type) {
650                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
651                         if (tlv_len < sizeof(uint32_t)) {
652                                 device_printf(sc->sc_dev,
653                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
654                                     __func__,
655                                     (int) tlv_len);
656                                 error = EINVAL;
657                                 goto parse_out;
658                         }
659                         sc->sc_capa_max_probe_len
660                             = le32toh(*(const uint32_t *)tlv_data);
661                         /* limit it to something sensible */
662                         if (sc->sc_capa_max_probe_len >
663                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
664                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
665                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
666                                     "ridiculous\n", __func__);
667                                 error = EINVAL;
668                                 goto parse_out;
669                         }
670                         break;
671                 case IWM_UCODE_TLV_PAN:
672                         if (tlv_len) {
673                                 device_printf(sc->sc_dev,
674                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
675                                     __func__,
676                                     (int) tlv_len);
677                                 error = EINVAL;
678                                 goto parse_out;
679                         }
680                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
681                         break;
682                 case IWM_UCODE_TLV_FLAGS:
683                         if (tlv_len < sizeof(uint32_t)) {
684                                 device_printf(sc->sc_dev,
685                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
686                                     __func__,
687                                     (int) tlv_len);
688                                 error = EINVAL;
689                                 goto parse_out;
690                         }
691                         /*
692                          * Apparently there can be many flags, but Linux driver
693                          * parses only the first one, and so do we.
694                          *
695                          * XXX: why does this override IWM_UCODE_TLV_PAN?
696                          * Intentional or a bug?  Observations from
697                          * current firmware file:
698                          *  1) TLV_PAN is parsed first
699                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
700                          * ==> this resets TLV_PAN to itself... hnnnk
701                          */
702                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
703                         break;
704                 case IWM_UCODE_TLV_CSCHEME:
705                         if ((error = iwm_store_cscheme(sc,
706                             tlv_data, tlv_len)) != 0) {
707                                 device_printf(sc->sc_dev,
708                                     "%s: iwm_store_cscheme(): returned %d\n",
709                                     __func__,
710                                     error);
711                                 goto parse_out;
712                         }
713                         break;
714                 case IWM_UCODE_TLV_NUM_OF_CPU:
715                         if (tlv_len != sizeof(uint32_t)) {
716                                 device_printf(sc->sc_dev,
717                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
718                                     __func__,
719                                     (int) tlv_len);
720                                 error = EINVAL;
721                                 goto parse_out;
722                         }
723                         num_of_cpus = le32toh(*(const uint32_t *)tlv_data);
724                         if (num_of_cpus == 2) {
725                                 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
726                                         TRUE;
727                                 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
728                                         TRUE;
729                                 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
730                                         TRUE;
731                         } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
732                                 device_printf(sc->sc_dev,
733                                     "%s: Driver supports only 1 or 2 CPUs\n",
734                                     __func__);
735                                 error = EINVAL;
736                                 goto parse_out;
737                         }
738                         break;
739                 case IWM_UCODE_TLV_SEC_RT:
740                         if ((error = iwm_firmware_store_section(sc,
741                             IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
742                                 device_printf(sc->sc_dev,
743                                     "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
744                                     __func__,
745                                     error);
746                                 goto parse_out;
747                         }
748                         break;
749                 case IWM_UCODE_TLV_SEC_INIT:
750                         if ((error = iwm_firmware_store_section(sc,
751                             IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
752                                 device_printf(sc->sc_dev,
753                                     "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
754                                     __func__,
755                                     error);
756                                 goto parse_out;
757                         }
758                         break;
759                 case IWM_UCODE_TLV_SEC_WOWLAN:
760                         if ((error = iwm_firmware_store_section(sc,
761                             IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
762                                 device_printf(sc->sc_dev,
763                                     "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
764                                     __func__,
765                                     error);
766                                 goto parse_out;
767                         }
768                         break;
769                 case IWM_UCODE_TLV_DEF_CALIB:
770                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
771                                 device_printf(sc->sc_dev,
772                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
773                                     __func__,
774                                     (int) tlv_len,
775                                     (int) sizeof(struct iwm_tlv_calib_data));
776                                 error = EINVAL;
777                                 goto parse_out;
778                         }
779                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
780                                 device_printf(sc->sc_dev,
781                                     "%s: iwm_set_default_calib() failed: %d\n",
782                                     __func__,
783                                     error);
784                                 goto parse_out;
785                         }
786                         break;
787                 case IWM_UCODE_TLV_PHY_SKU:
788                         if (tlv_len != sizeof(uint32_t)) {
789                                 error = EINVAL;
790                                 device_printf(sc->sc_dev,
791                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
792                                     __func__,
793                                     (int) tlv_len);
794                                 goto parse_out;
795                         }
796                         sc->sc_fw.phy_config =
797                             le32toh(*(const uint32_t *)tlv_data);
798                         sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
799                                                   IWM_FW_PHY_CFG_TX_CHAIN) >>
800                                                   IWM_FW_PHY_CFG_TX_CHAIN_POS;
801                         sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
802                                                   IWM_FW_PHY_CFG_RX_CHAIN) >>
803                                                   IWM_FW_PHY_CFG_RX_CHAIN_POS;
804                         break;
805
806                 case IWM_UCODE_TLV_API_CHANGES_SET: {
807                         const struct iwm_ucode_api *api;
808                         if (tlv_len != sizeof(*api)) {
809                                 error = EINVAL;
810                                 goto parse_out;
811                         }
812                         api = (const struct iwm_ucode_api *)tlv_data;
813                         /* Flags may exceed 32 bits in future firmware. */
814                         if (le32toh(api->api_index) > 0) {
815                                 device_printf(sc->sc_dev,
816                                     "unsupported API index %d\n",
817                                     le32toh(api->api_index));
818                                 goto parse_out;
819                         }
820                         sc->sc_ucode_api = le32toh(api->api_flags);
821                         break;
822                 }
823
824                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
825                         const struct iwm_ucode_capa *capa;
826                         int idx, i;
827                         if (tlv_len != sizeof(*capa)) {
828                                 error = EINVAL;
829                                 goto parse_out;
830                         }
831                         capa = (const struct iwm_ucode_capa *)tlv_data;
832                         idx = le32toh(capa->api_index);
833                         if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
834                                 device_printf(sc->sc_dev,
835                                     "unsupported API index %d\n", idx);
836                                 goto parse_out;
837                         }
838                         for (i = 0; i < 32; i++) {
839                                 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
840                                         continue;
841                                 setbit(sc->sc_enabled_capa, i + (32 * idx));
842                         }
843                         break;
844                 }
845
846                 case 48: /* undocumented TLV */
847                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
848                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
849                         /* ignore, not used by current driver */
850                         break;
851
852                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
853                         if ((error = iwm_firmware_store_section(sc,
854                             IWM_UCODE_REGULAR_USNIFFER, tlv_data,
855                             tlv_len)) != 0)
856                                 goto parse_out;
857                         break;
858
859                 case IWM_UCODE_TLV_PAGING:
860                         if (tlv_len != sizeof(uint32_t)) {
861                                 error = EINVAL;
862                                 goto parse_out;
863                         }
864                         paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
865
866                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
867                             "%s: Paging: paging enabled (size = %u bytes)\n",
868                             __func__, paging_mem_size);
869                         if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
870                                 device_printf(sc->sc_dev,
871                                         "%s: Paging: driver supports up to %u bytes for paging image\n",
872                                         __func__, IWM_MAX_PAGING_IMAGE_SIZE);
873                                 error = EINVAL;
874                                 goto out;
875                         }
876                         if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
877                                 device_printf(sc->sc_dev,
878                                     "%s: Paging: image isn't multiple %u\n",
879                                     __func__, IWM_FW_PAGING_SIZE);
880                                 error = EINVAL;
881                                 goto out;
882                         }
883
884                         sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
885                             paging_mem_size;
886                         usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
887                         sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
888                             paging_mem_size;
889                         break;
890
891                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
892                         if (tlv_len != sizeof(uint32_t)) {
893                                 error = EINVAL;
894                                 goto parse_out;
895                         }
896                         sc->sc_capa_n_scan_channels =
897                           le32toh(*(const uint32_t *)tlv_data);
898                         break;
899
900                 case IWM_UCODE_TLV_FW_VERSION:
901                         if (tlv_len != sizeof(uint32_t) * 3) {
902                                 error = EINVAL;
903                                 goto parse_out;
904                         }
905                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
906                             "%d.%d.%d",
907                             le32toh(((const uint32_t *)tlv_data)[0]),
908                             le32toh(((const uint32_t *)tlv_data)[1]),
909                             le32toh(((const uint32_t *)tlv_data)[2]));
910                         break;
911
912                 case IWM_UCODE_TLV_FW_MEM_SEG:
913                         break;
914
915                 default:
916                         device_printf(sc->sc_dev,
917                             "%s: unknown firmware section %d, abort\n",
918                             __func__, tlv_type);
919                         error = EINVAL;
920                         goto parse_out;
921                 }
922
923                 len -= roundup(tlv_len, 4);
924                 data += roundup(tlv_len, 4);
925         }
926
927         KASSERT(error == 0, ("unhandled error"));
928
929  parse_out:
930         if (error) {
931                 device_printf(sc->sc_dev, "firmware parse error %d, "
932                     "section type %d\n", error, tlv_type);
933         }
934
935         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
936                 device_printf(sc->sc_dev,
937                     "device uses unsupported power ops\n");
938                 error = ENOTSUP;
939         }
940
941  out:
942         if (error) {
943                 fw->fw_status = IWM_FW_STATUS_NONE;
944                 if (fw->fw_fp != NULL)
945                         iwm_fw_info_free(fw);
946         } else
947                 fw->fw_status = IWM_FW_STATUS_DONE;
948         wakeup(&sc->sc_fw);
949
950         return error;
951 }
952
953 /*
954  * DMA resource routines
955  */
956
957 static void
958 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
959 {
960         if (error != 0)
961                 return;
962         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
963         *(bus_addr_t *)arg = segs[0].ds_addr;
964 }
965
966 static int
967 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
968     bus_size_t size, bus_size_t alignment)
969 {
970         int error;
971
972         dma->tag = NULL;
973         dma->map = NULL;
974         dma->size = size;
975         dma->vaddr = NULL;
976
977         error = bus_dma_tag_create(tag, alignment,
978             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
979             1, size, 0, NULL, NULL, &dma->tag);
980         if (error != 0)
981                 goto fail;
982
983         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
984             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
985         if (error != 0)
986                 goto fail;
987
988         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
989             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
990         if (error != 0) {
991                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
992                 dma->vaddr = NULL;
993                 goto fail;
994         }
995
996         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
997
998         return 0;
999
1000 fail:
1001         iwm_dma_contig_free(dma);
1002
1003         return error;
1004 }
1005
1006 static void
1007 iwm_dma_contig_free(struct iwm_dma_info *dma)
1008 {
1009         if (dma->vaddr != NULL) {
1010                 bus_dmamap_sync(dma->tag, dma->map,
1011                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1012                 bus_dmamap_unload(dma->tag, dma->map);
1013                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1014                 dma->vaddr = NULL;
1015         }
1016         if (dma->tag != NULL) {
1017                 bus_dma_tag_destroy(dma->tag);
1018                 dma->tag = NULL;
1019         }
1020 }
1021
1022 /* fwmem is used to load firmware onto the card */
1023 static int
1024 iwm_alloc_fwmem(struct iwm_softc *sc)
1025 {
1026         /* Must be aligned on a 16-byte boundary. */
1027         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
1028             IWM_FH_MEM_TB_MAX_LENGTH, 16);
1029 }
1030
1031 /* tx scheduler rings.  not used? */
1032 static int
1033 iwm_alloc_sched(struct iwm_softc *sc)
1034 {
1035         /* TX scheduler rings must be aligned on a 1KB boundary. */
1036         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
1037             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
1038 }
1039
1040 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
1041 static int
1042 iwm_alloc_kw(struct iwm_softc *sc)
1043 {
1044         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
1045 }
1046
1047 /* interrupt cause table */
1048 static int
1049 iwm_alloc_ict(struct iwm_softc *sc)
1050 {
1051         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
1052             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
1053 }
1054
1055 static int
1056 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1057 {
1058         bus_size_t size;
1059         int i, error;
1060
1061         ring->cur = 0;
1062
1063         /* Allocate RX descriptors (256-byte aligned). */
1064         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1065         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1066         if (error != 0) {
1067                 device_printf(sc->sc_dev,
1068                     "could not allocate RX ring DMA memory\n");
1069                 goto fail;
1070         }
1071         ring->desc = ring->desc_dma.vaddr;
1072
1073         /* Allocate RX status area (16-byte aligned). */
1074         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1075             sizeof(*ring->stat), 16);
1076         if (error != 0) {
1077                 device_printf(sc->sc_dev,
1078                     "could not allocate RX status DMA memory\n");
1079                 goto fail;
1080         }
1081         ring->stat = ring->stat_dma.vaddr;
1082
1083         /* Create RX buffer DMA tag. */
1084         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1085             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1086             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1087         if (error != 0) {
1088                 device_printf(sc->sc_dev,
1089                     "%s: could not create RX buf DMA tag, error %d\n",
1090                     __func__, error);
1091                 goto fail;
1092         }
1093
1094         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1095         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1096         if (error != 0) {
1097                 device_printf(sc->sc_dev,
1098                     "%s: could not create RX buf DMA map, error %d\n",
1099                     __func__, error);
1100                 goto fail;
1101         }
1102         /*
1103          * Allocate and map RX buffers.
1104          */
1105         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1106                 struct iwm_rx_data *data = &ring->data[i];
1107                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1108                 if (error != 0) {
1109                         device_printf(sc->sc_dev,
1110                             "%s: could not create RX buf DMA map, error %d\n",
1111                             __func__, error);
1112                         goto fail;
1113                 }
1114                 data->m = NULL;
1115
1116                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1117                         goto fail;
1118                 }
1119         }
1120         return 0;
1121
1122 fail:   iwm_free_rx_ring(sc, ring);
1123         return error;
1124 }
1125
1126 static void
1127 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1128 {
1129         /* Reset the ring state */
1130         ring->cur = 0;
1131
1132         /*
1133          * The hw rx ring index in shared memory must also be cleared,
1134          * otherwise the discrepancy can cause reprocessing chaos.
1135          */
1136         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1137 }
1138
1139 static void
1140 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1141 {
1142         int i;
1143
1144         iwm_dma_contig_free(&ring->desc_dma);
1145         iwm_dma_contig_free(&ring->stat_dma);
1146
1147         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1148                 struct iwm_rx_data *data = &ring->data[i];
1149
1150                 if (data->m != NULL) {
1151                         bus_dmamap_sync(ring->data_dmat, data->map,
1152                             BUS_DMASYNC_POSTREAD);
1153                         bus_dmamap_unload(ring->data_dmat, data->map);
1154                         m_freem(data->m);
1155                         data->m = NULL;
1156                 }
1157                 if (data->map != NULL) {
1158                         bus_dmamap_destroy(ring->data_dmat, data->map);
1159                         data->map = NULL;
1160                 }
1161         }
1162         if (ring->spare_map != NULL) {
1163                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1164                 ring->spare_map = NULL;
1165         }
1166         if (ring->data_dmat != NULL) {
1167                 bus_dma_tag_destroy(ring->data_dmat);
1168                 ring->data_dmat = NULL;
1169         }
1170 }
1171
1172 static int
1173 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1174 {
1175         bus_addr_t paddr;
1176         bus_size_t size;
1177         size_t maxsize;
1178         int nsegments;
1179         int i, error;
1180
1181         ring->qid = qid;
1182         ring->queued = 0;
1183         ring->cur = 0;
1184
1185         /* Allocate TX descriptors (256-byte aligned). */
1186         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1187         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1188         if (error != 0) {
1189                 device_printf(sc->sc_dev,
1190                     "could not allocate TX ring DMA memory\n");
1191                 goto fail;
1192         }
1193         ring->desc = ring->desc_dma.vaddr;
1194
1195         /*
1196          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1197          * to allocate commands space for other rings.
1198          */
1199         if (qid > IWM_MVM_CMD_QUEUE)
1200                 return 0;
1201
1202         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1203         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1204         if (error != 0) {
1205                 device_printf(sc->sc_dev,
1206                     "could not allocate TX cmd DMA memory\n");
1207                 goto fail;
1208         }
1209         ring->cmd = ring->cmd_dma.vaddr;
1210
1211         /* FW commands may require more mapped space than packets. */
1212         if (qid == IWM_MVM_CMD_QUEUE) {
1213                 maxsize = IWM_RBUF_SIZE;
1214                 nsegments = 1;
1215         } else {
1216                 maxsize = MCLBYTES;
1217                 nsegments = IWM_MAX_SCATTER - 2;
1218         }
1219
1220         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1221             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1222             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1223         if (error != 0) {
1224                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1225                 goto fail;
1226         }
1227
1228         paddr = ring->cmd_dma.paddr;
1229         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1230                 struct iwm_tx_data *data = &ring->data[i];
1231
1232                 data->cmd_paddr = paddr;
1233                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1234                     + offsetof(struct iwm_tx_cmd, scratch);
1235                 paddr += sizeof(struct iwm_device_cmd);
1236
1237                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1238                 if (error != 0) {
1239                         device_printf(sc->sc_dev,
1240                             "could not create TX buf DMA map\n");
1241                         goto fail;
1242                 }
1243         }
1244         KASSERT(paddr == ring->cmd_dma.paddr + size,
1245             ("invalid physical address"));
1246         return 0;
1247
1248 fail:   iwm_free_tx_ring(sc, ring);
1249         return error;
1250 }
1251
1252 static void
1253 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1254 {
1255         int i;
1256
1257         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1258                 struct iwm_tx_data *data = &ring->data[i];
1259
1260                 if (data->m != NULL) {
1261                         bus_dmamap_sync(ring->data_dmat, data->map,
1262                             BUS_DMASYNC_POSTWRITE);
1263                         bus_dmamap_unload(ring->data_dmat, data->map);
1264                         m_freem(data->m);
1265                         data->m = NULL;
1266                 }
1267         }
1268         /* Clear TX descriptors. */
1269         memset(ring->desc, 0, ring->desc_dma.size);
1270         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1271             BUS_DMASYNC_PREWRITE);
1272         sc->qfullmsk &= ~(1 << ring->qid);
1273         ring->queued = 0;
1274         ring->cur = 0;
1275
1276         if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1277                 iwm_pcie_clear_cmd_in_flight(sc);
1278 }
1279
1280 static void
1281 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1282 {
1283         int i;
1284
1285         iwm_dma_contig_free(&ring->desc_dma);
1286         iwm_dma_contig_free(&ring->cmd_dma);
1287
1288         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1289                 struct iwm_tx_data *data = &ring->data[i];
1290
1291                 if (data->m != NULL) {
1292                         bus_dmamap_sync(ring->data_dmat, data->map,
1293                             BUS_DMASYNC_POSTWRITE);
1294                         bus_dmamap_unload(ring->data_dmat, data->map);
1295                         m_freem(data->m);
1296                         data->m = NULL;
1297                 }
1298                 if (data->map != NULL) {
1299                         bus_dmamap_destroy(ring->data_dmat, data->map);
1300                         data->map = NULL;
1301                 }
1302         }
1303         if (ring->data_dmat != NULL) {
1304                 bus_dma_tag_destroy(ring->data_dmat);
1305                 ring->data_dmat = NULL;
1306         }
1307 }
1308
1309 /*
1310  * High-level hardware frobbing routines
1311  */
1312
1313 static void
1314 iwm_enable_interrupts(struct iwm_softc *sc)
1315 {
1316         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1317         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1318 }
1319
1320 static void
1321 iwm_restore_interrupts(struct iwm_softc *sc)
1322 {
1323         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1324 }
1325
1326 static void
1327 iwm_disable_interrupts(struct iwm_softc *sc)
1328 {
1329         /* disable interrupts */
1330         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1331
1332         /* acknowledge all interrupts */
1333         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1334         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1335 }
1336
1337 static void
1338 iwm_ict_reset(struct iwm_softc *sc)
1339 {
1340         iwm_disable_interrupts(sc);
1341
1342         /* Reset ICT table. */
1343         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1344         sc->ict_cur = 0;
1345
1346         /* Set physical address of ICT table (4KB aligned). */
1347         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1348             IWM_CSR_DRAM_INT_TBL_ENABLE
1349             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1350             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1351             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1352
1353         /* Switch to ICT interrupt mode in driver. */
1354         sc->sc_flags |= IWM_FLAG_USE_ICT;
1355
1356         /* Re-enable interrupts. */
1357         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1358         iwm_enable_interrupts(sc);
1359 }
1360
1361 /* iwlwifi pcie/trans.c */
1362
1363 /*
1364  * Since this .. hard-resets things, it's time to actually
1365  * mark the first vap (if any) as having no mac context.
1366  * It's annoying, but since the driver is potentially being
1367  * stop/start'ed whilst active (thanks openbsd port!) we
1368  * have to correctly track this.
1369  */
1370 static void
1371 iwm_stop_device(struct iwm_softc *sc)
1372 {
1373         struct ieee80211com *ic = &sc->sc_ic;
1374         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1375         int chnl, qid;
1376         uint32_t mask = 0;
1377
1378         /* tell the device to stop sending interrupts */
1379         iwm_disable_interrupts(sc);
1380
1381         /*
1382          * FreeBSD-local: mark the first vap as not-uploaded,
1383          * so the next transition through auth/assoc
1384          * will correctly populate the MAC context.
1385          */
1386         if (vap) {
1387                 struct iwm_vap *iv = IWM_VAP(vap);
1388                 iv->is_uploaded = 0;
1389         }
1390
1391         /* device going down, Stop using ICT table */
1392         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1393
1394         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1395
1396         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1397
1398         if (iwm_nic_lock(sc)) {
1399                 /* Stop each Tx DMA channel */
1400                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1401                         IWM_WRITE(sc,
1402                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1403                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1404                 }
1405
1406                 /* Wait for DMA channels to be idle */
1407                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1408                     5000)) {
1409                         device_printf(sc->sc_dev,
1410                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1411                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1412                 }
1413                 iwm_nic_unlock(sc);
1414         }
1415         iwm_pcie_rx_stop(sc);
1416
1417         /* Stop RX ring. */
1418         iwm_reset_rx_ring(sc, &sc->rxq);
1419
1420         /* Reset all TX rings. */
1421         for (qid = 0; qid < nitems(sc->txq); qid++)
1422                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1423
1424         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1425                 /* Power-down device's busmaster DMA clocks */
1426                 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1427                     IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1428                 DELAY(5);
1429         }
1430
1431         /* Make sure (redundant) we've released our request to stay awake */
1432         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1433             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1434
1435         /* Stop the device, and put it in low power state */
1436         iwm_apm_stop(sc);
1437
1438         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1439          * Clean again the interrupt here
1440          */
1441         iwm_disable_interrupts(sc);
1442         /* stop and reset the on-board processor */
1443         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1444
1445         /*
1446          * Even if we stop the HW, we still want the RF kill
1447          * interrupt
1448          */
1449         iwm_enable_rfkill_int(sc);
1450         iwm_check_rfkill(sc);
1451 }
1452
1453 /* iwlwifi: mvm/ops.c */
1454 static void
1455 iwm_mvm_nic_config(struct iwm_softc *sc)
1456 {
1457         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1458         uint32_t reg_val = 0;
1459         uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1460
1461         radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1462             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1463         radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1464             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1465         radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1466             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1467
1468         /* SKU control */
1469         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1470             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1471         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1472             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1473
1474         /* radio configuration */
1475         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1476         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1477         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1478
1479         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1480
1481         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1482             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1483             radio_cfg_step, radio_cfg_dash);
1484
1485         /*
1486          * W/A : NIC is stuck in a reset state after Early PCIe power off
1487          * (PCIe power is lost before PERST# is asserted), causing ME FW
1488          * to lose ownership and not being able to obtain it back.
1489          */
1490         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1491                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1492                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1493                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1494         }
1495 }
1496
1497 static int
1498 iwm_nic_rx_init(struct iwm_softc *sc)
1499 {
1500         /*
1501          * Initialize RX ring.  This is from the iwn driver.
1502          */
1503         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1504
1505         /* Stop Rx DMA */
1506         iwm_pcie_rx_stop(sc);
1507
1508         if (!iwm_nic_lock(sc))
1509                 return EBUSY;
1510
1511         /* reset and flush pointers */
1512         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1513         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1514         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1515         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1516
1517         /* Set physical address of RX ring (256-byte aligned). */
1518         IWM_WRITE(sc,
1519             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1520
1521         /* Set physical address of RX status (16-byte aligned). */
1522         IWM_WRITE(sc,
1523             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1524
1525         /* Enable RX. */
1526         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1527             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1528             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1529             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1530             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1531             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1532             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1533             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1534
1535         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1536
1537         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1538         if (sc->cfg->host_interrupt_operation_mode)
1539                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1540
1541         /*
1542          * Thus sayeth el jefe (iwlwifi) via a comment:
1543          *
1544          * This value should initially be 0 (before preparing any
1545          * RBs), should be 8 after preparing the first 8 RBs (for example)
1546          */
1547         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1548
1549         iwm_nic_unlock(sc);
1550
1551         return 0;
1552 }
1553
1554 static int
1555 iwm_nic_tx_init(struct iwm_softc *sc)
1556 {
1557         int qid;
1558
1559         if (!iwm_nic_lock(sc))
1560                 return EBUSY;
1561
1562         /* Deactivate TX scheduler. */
1563         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1564
1565         /* Set physical address of "keep warm" page (16-byte aligned). */
1566         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1567
1568         /* Initialize TX rings. */
1569         for (qid = 0; qid < nitems(sc->txq); qid++) {
1570                 struct iwm_tx_ring *txq = &sc->txq[qid];
1571
1572                 /* Set physical address of TX ring (256-byte aligned). */
1573                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1574                     txq->desc_dma.paddr >> 8);
1575                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1576                     "%s: loading ring %d descriptors (%p) at %lx\n",
1577                     __func__,
1578                     qid, txq->desc,
1579                     (unsigned long) (txq->desc_dma.paddr >> 8));
1580         }
1581
1582         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1583
1584         iwm_nic_unlock(sc);
1585
1586         return 0;
1587 }
1588
1589 static int
1590 iwm_nic_init(struct iwm_softc *sc)
1591 {
1592         int error;
1593
1594         iwm_apm_init(sc);
1595         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1596                 iwm_set_pwr(sc);
1597
1598         iwm_mvm_nic_config(sc);
1599
1600         if ((error = iwm_nic_rx_init(sc)) != 0)
1601                 return error;
1602
1603         /*
1604          * Ditto for TX, from iwn
1605          */
1606         if ((error = iwm_nic_tx_init(sc)) != 0)
1607                 return error;
1608
1609         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1610             "%s: shadow registers enabled\n", __func__);
1611         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1612
1613         return 0;
1614 }
1615
1616 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1617         IWM_MVM_TX_FIFO_VO,
1618         IWM_MVM_TX_FIFO_VI,
1619         IWM_MVM_TX_FIFO_BE,
1620         IWM_MVM_TX_FIFO_BK,
1621 };
1622
1623 static int
1624 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1625 {
1626         if (!iwm_nic_lock(sc)) {
1627                 device_printf(sc->sc_dev,
1628                     "%s: cannot enable txq %d\n",
1629                     __func__,
1630                     qid);
1631                 return EBUSY;
1632         }
1633
1634         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1635
1636         if (qid == IWM_MVM_CMD_QUEUE) {
1637                 /* unactivate before configuration */
1638                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1639                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1640                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1641
1642                 iwm_nic_unlock(sc);
1643
1644                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1645
1646                 if (!iwm_nic_lock(sc)) {
1647                         device_printf(sc->sc_dev,
1648                             "%s: cannot enable txq %d\n", __func__, qid);
1649                         return EBUSY;
1650                 }
1651                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1652                 iwm_nic_unlock(sc);
1653
1654                 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1655                 /* Set scheduler window size and frame limit. */
1656                 iwm_write_mem32(sc,
1657                     sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1658                     sizeof(uint32_t),
1659                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1660                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1661                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1662                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1663
1664                 if (!iwm_nic_lock(sc)) {
1665                         device_printf(sc->sc_dev,
1666                             "%s: cannot enable txq %d\n", __func__, qid);
1667                         return EBUSY;
1668                 }
1669                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1670                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1671                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1672                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1673                     IWM_SCD_QUEUE_STTS_REG_MSK);
1674         } else {
1675                 struct iwm_scd_txq_cfg_cmd cmd;
1676                 int error;
1677
1678                 iwm_nic_unlock(sc);
1679
1680                 memset(&cmd, 0, sizeof(cmd));
1681                 cmd.scd_queue = qid;
1682                 cmd.enable = 1;
1683                 cmd.sta_id = sta_id;
1684                 cmd.tx_fifo = fifo;
1685                 cmd.aggregate = 0;
1686                 cmd.window = IWM_FRAME_LIMIT;
1687
1688                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1689                     sizeof(cmd), &cmd);
1690                 if (error) {
1691                         device_printf(sc->sc_dev,
1692                             "cannot enable txq %d\n", qid);
1693                         return error;
1694                 }
1695
1696                 if (!iwm_nic_lock(sc))
1697                         return EBUSY;
1698         }
1699
1700         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1701             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1702
1703         iwm_nic_unlock(sc);
1704
1705         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1706             __func__, qid, fifo);
1707
1708         return 0;
1709 }
1710
1711 static int
1712 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1713 {
1714         int error, chnl;
1715
1716         int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1717             IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1718
1719         if (!iwm_nic_lock(sc))
1720                 return EBUSY;
1721
1722         iwm_ict_reset(sc);
1723
1724         iwm_nic_unlock(sc);
1725
1726         sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1727         if (scd_base_addr != 0 &&
1728             scd_base_addr != sc->scd_base_addr) {
1729                 device_printf(sc->sc_dev,
1730                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1731                     __func__, sc->scd_base_addr, scd_base_addr);
1732         }
1733
1734         /* reset context data, TX status and translation data */
1735         error = iwm_write_mem(sc,
1736             sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1737             NULL, clear_dwords);
1738         if (error)
1739                 return EBUSY;
1740
1741         if (!iwm_nic_lock(sc))
1742                 return EBUSY;
1743
1744         /* Set physical address of TX scheduler rings (1KB aligned). */
1745         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1746
1747         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1748
1749         iwm_nic_unlock(sc);
1750
1751         /* enable command channel */
1752         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1753         if (error)
1754                 return error;
1755
1756         if (!iwm_nic_lock(sc))
1757                 return EBUSY;
1758
1759         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1760
1761         /* Enable DMA channels. */
1762         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1763                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1764                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1765                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1766         }
1767
1768         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1769             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1770
1771         iwm_nic_unlock(sc);
1772
1773         /* Enable L1-Active */
1774         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1775                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1776                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1777         }
1778
1779         return error;
1780 }
1781
1782 /*
1783  * NVM read access and content parsing.  We do not support
1784  * external NVM or writing NVM.
1785  * iwlwifi/mvm/nvm.c
1786  */
1787
1788 /* Default NVM size to read */
1789 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1790
1791 #define IWM_NVM_WRITE_OPCODE 1
1792 #define IWM_NVM_READ_OPCODE 0
1793
1794 /* load nvm chunk response */
1795 enum {
1796         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1797         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1798 };
1799
1800 static int
1801 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1802         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1803 {
1804         struct iwm_nvm_access_cmd nvm_access_cmd = {
1805                 .offset = htole16(offset),
1806                 .length = htole16(length),
1807                 .type = htole16(section),
1808                 .op_code = IWM_NVM_READ_OPCODE,
1809         };
1810         struct iwm_nvm_access_resp *nvm_resp;
1811         struct iwm_rx_packet *pkt;
1812         struct iwm_host_cmd cmd = {
1813                 .id = IWM_NVM_ACCESS_CMD,
1814                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1815                 .data = { &nvm_access_cmd, },
1816         };
1817         int ret, bytes_read, offset_read;
1818         uint8_t *resp_data;
1819
1820         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1821
1822         ret = iwm_send_cmd(sc, &cmd);
1823         if (ret) {
1824                 device_printf(sc->sc_dev,
1825                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1826                 return ret;
1827         }
1828
1829         pkt = cmd.resp_pkt;
1830
1831         /* Extract NVM response */
1832         nvm_resp = (void *)pkt->data;
1833         ret = le16toh(nvm_resp->status);
1834         bytes_read = le16toh(nvm_resp->length);
1835         offset_read = le16toh(nvm_resp->offset);
1836         resp_data = nvm_resp->data;
1837         if (ret) {
1838                 if ((offset != 0) &&
1839                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1840                         /*
1841                          * meaning of NOT_VALID_ADDRESS:
1842                          * driver try to read chunk from address that is
1843                          * multiple of 2K and got an error since addr is empty.
1844                          * meaning of (offset != 0): driver already
1845                          * read valid data from another chunk so this case
1846                          * is not an error.
1847                          */
1848                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1849                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1850                                     offset);
1851                         *len = 0;
1852                         ret = 0;
1853                 } else {
1854                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1855                                     "NVM access command failed with status %d\n", ret);
1856                         ret = EIO;
1857                 }
1858                 goto exit;
1859         }
1860
1861         if (offset_read != offset) {
1862                 device_printf(sc->sc_dev,
1863                     "NVM ACCESS response with invalid offset %d\n",
1864                     offset_read);
1865                 ret = EINVAL;
1866                 goto exit;
1867         }
1868
1869         if (bytes_read > length) {
1870                 device_printf(sc->sc_dev,
1871                     "NVM ACCESS response with too much data "
1872                     "(%d bytes requested, %d bytes received)\n",
1873                     length, bytes_read);
1874                 ret = EINVAL;
1875                 goto exit;
1876         }
1877
1878         /* Write data to NVM */
1879         memcpy(data + offset, resp_data, bytes_read);
1880         *len = bytes_read;
1881
1882  exit:
1883         iwm_free_resp(sc, &cmd);
1884         return ret;
1885 }
1886
1887 /*
1888  * Reads an NVM section completely.
1889  * NICs prior to 7000 family don't have a real NVM, but just read
1890  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1891  * by uCode, we need to manually check in this case that we don't
1892  * overflow and try to read more than the EEPROM size.
1893  * For 7000 family NICs, we supply the maximal size we can read, and
1894  * the uCode fills the response with as much data as we can,
1895  * without overflowing, so no check is needed.
1896  */
1897 static int
1898 iwm_nvm_read_section(struct iwm_softc *sc,
1899         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1900 {
1901         uint16_t seglen, length, offset = 0;
1902         int ret;
1903
1904         /* Set nvm section read length */
1905         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1906
1907         seglen = length;
1908
1909         /* Read the NVM until exhausted (reading less than requested) */
1910         while (seglen == length) {
1911                 /* Check no memory assumptions fail and cause an overflow */
1912                 if ((size_read + offset + length) >
1913                     sc->cfg->eeprom_size) {
1914                         device_printf(sc->sc_dev,
1915                             "EEPROM size is too small for NVM\n");
1916                         return ENOBUFS;
1917                 }
1918
1919                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1920                 if (ret) {
1921                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1922                                     "Cannot read NVM from section %d offset %d, length %d\n",
1923                                     section, offset, length);
1924                         return ret;
1925                 }
1926                 offset += seglen;
1927         }
1928
1929         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1930                     "NVM section %d read completed\n", section);
1931         *len = offset;
1932         return 0;
1933 }
1934
1935 /*
1936  * BEGIN IWM_NVM_PARSE
1937  */
1938
1939 /* iwlwifi/iwl-nvm-parse.c */
1940
1941 /* NVM offsets (in words) definitions */
1942 enum iwm_nvm_offsets {
1943         /* NVM HW-Section offset (in words) definitions */
1944         IWM_HW_ADDR = 0x15,
1945
1946 /* NVM SW-Section offset (in words) definitions */
1947         IWM_NVM_SW_SECTION = 0x1C0,
1948         IWM_NVM_VERSION = 0,
1949         IWM_RADIO_CFG = 1,
1950         IWM_SKU = 2,
1951         IWM_N_HW_ADDRS = 3,
1952         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1953
1954 /* NVM calibration section offset (in words) definitions */
1955         IWM_NVM_CALIB_SECTION = 0x2B8,
1956         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1957 };
1958
1959 enum iwm_8000_nvm_offsets {
1960         /* NVM HW-Section offset (in words) definitions */
1961         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1962         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1963         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1964         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1965         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1966
1967         /* NVM SW-Section offset (in words) definitions */
1968         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1969         IWM_NVM_VERSION_8000 = 0,
1970         IWM_RADIO_CFG_8000 = 0,
1971         IWM_SKU_8000 = 2,
1972         IWM_N_HW_ADDRS_8000 = 3,
1973
1974         /* NVM REGULATORY -Section offset (in words) definitions */
1975         IWM_NVM_CHANNELS_8000 = 0,
1976         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1977         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1978         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1979
1980         /* NVM calibration section offset (in words) definitions */
1981         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1982         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1983 };
1984
1985 /* SKU Capabilities (actual values from NVM definition) */
1986 enum nvm_sku_bits {
1987         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1988         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1989         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1990         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1991 };
1992
1993 /* radio config bits (actual values from NVM definition) */
1994 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1995 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1996 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1997 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1998 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1999 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
2000
2001 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
2002 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
2003 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
2004 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
2005 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
2006 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
2007
2008 #define DEFAULT_MAX_TX_POWER 16
2009
2010 /**
2011  * enum iwm_nvm_channel_flags - channel flags in NVM
2012  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
2013  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
2014  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
2015  * @IWM_NVM_CHANNEL_RADAR: radar detection required
2016  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
2017  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
2018  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
2019  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
2020  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
2021  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
2022  */
2023 enum iwm_nvm_channel_flags {
2024         IWM_NVM_CHANNEL_VALID = (1 << 0),
2025         IWM_NVM_CHANNEL_IBSS = (1 << 1),
2026         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
2027         IWM_NVM_CHANNEL_RADAR = (1 << 4),
2028         IWM_NVM_CHANNEL_DFS = (1 << 7),
2029         IWM_NVM_CHANNEL_WIDE = (1 << 8),
2030         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
2031         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
2032         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2033 };
2034
2035 /*
2036  * Translate EEPROM flags to net80211.
2037  */
2038 static uint32_t
2039 iwm_eeprom_channel_flags(uint16_t ch_flags)
2040 {
2041         uint32_t nflags;
2042
2043         nflags = 0;
2044         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2045                 nflags |= IEEE80211_CHAN_PASSIVE;
2046         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2047                 nflags |= IEEE80211_CHAN_NOADHOC;
2048         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2049                 nflags |= IEEE80211_CHAN_DFS;
2050                 /* Just in case. */
2051                 nflags |= IEEE80211_CHAN_NOADHOC;
2052         }
2053
2054         return (nflags);
2055 }
2056
2057 static void
2058 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2059     int maxchans, int *nchans, int ch_idx, size_t ch_num,
2060     const uint8_t bands[])
2061 {
2062         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2063         uint32_t nflags;
2064         uint16_t ch_flags;
2065         uint8_t ieee;
2066         int error;
2067
2068         for (; ch_idx < ch_num; ch_idx++) {
2069                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2070                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2071                         ieee = iwm_nvm_channels[ch_idx];
2072                 else
2073                         ieee = iwm_nvm_channels_8000[ch_idx];
2074
2075                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2076                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2077                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
2078                             ieee, ch_flags,
2079                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2080                             "5.2" : "2.4");
2081                         continue;
2082                 }
2083
2084                 nflags = iwm_eeprom_channel_flags(ch_flags);
2085                 error = ieee80211_add_channel(chans, maxchans, nchans,
2086                     ieee, 0, 0, nflags, bands);
2087                 if (error != 0)
2088                         break;
2089
2090                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2091                     "Ch. %d Flags %x [%sGHz] - Added\n",
2092                     ieee, ch_flags,
2093                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2094                     "5.2" : "2.4");
2095         }
2096 }
2097
2098 static void
2099 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2100     struct ieee80211_channel chans[])
2101 {
2102         struct iwm_softc *sc = ic->ic_softc;
2103         struct iwm_nvm_data *data = sc->nvm_data;
2104         uint8_t bands[IEEE80211_MODE_BYTES];
2105         size_t ch_num;
2106
2107         memset(bands, 0, sizeof(bands));
2108         /* 1-13: 11b/g channels. */
2109         setbit(bands, IEEE80211_MODE_11B);
2110         setbit(bands, IEEE80211_MODE_11G);
2111         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2112             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2113
2114         /* 14: 11b channel only. */
2115         clrbit(bands, IEEE80211_MODE_11G);
2116         iwm_add_channel_band(sc, chans, maxchans, nchans,
2117             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2118
2119         if (data->sku_cap_band_52GHz_enable) {
2120                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2121                         ch_num = nitems(iwm_nvm_channels);
2122                 else
2123                         ch_num = nitems(iwm_nvm_channels_8000);
2124                 memset(bands, 0, sizeof(bands));
2125                 setbit(bands, IEEE80211_MODE_11A);
2126                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2127                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2128         }
2129 }
2130
2131 static void
2132 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2133         const uint16_t *mac_override, const uint16_t *nvm_hw)
2134 {
2135         const uint8_t *hw_addr;
2136
2137         if (mac_override) {
2138                 static const uint8_t reserved_mac[] = {
2139                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2140                 };
2141
2142                 hw_addr = (const uint8_t *)(mac_override +
2143                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2144
2145                 /*
2146                  * Store the MAC address from MAO section.
2147                  * No byte swapping is required in MAO section
2148                  */
2149                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2150
2151                 /*
2152                  * Force the use of the OTP MAC address in case of reserved MAC
2153                  * address in the NVM, or if address is given but invalid.
2154                  */
2155                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2156                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2157                     iwm_is_valid_ether_addr(data->hw_addr) &&
2158                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2159                         return;
2160
2161                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2162                     "%s: mac address from nvm override section invalid\n",
2163                     __func__);
2164         }
2165
2166         if (nvm_hw) {
2167                 /* read the mac address from WFMP registers */
2168                 uint32_t mac_addr0 =
2169                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2170                 uint32_t mac_addr1 =
2171                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2172
2173                 hw_addr = (const uint8_t *)&mac_addr0;
2174                 data->hw_addr[0] = hw_addr[3];
2175                 data->hw_addr[1] = hw_addr[2];
2176                 data->hw_addr[2] = hw_addr[1];
2177                 data->hw_addr[3] = hw_addr[0];
2178
2179                 hw_addr = (const uint8_t *)&mac_addr1;
2180                 data->hw_addr[4] = hw_addr[1];
2181                 data->hw_addr[5] = hw_addr[0];
2182
2183                 return;
2184         }
2185
2186         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2187         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2188 }
2189
2190 static int
2191 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2192             const uint16_t *phy_sku)
2193 {
2194         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2195                 return le16_to_cpup(nvm_sw + IWM_SKU);
2196
2197         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2198 }
2199
2200 static int
2201 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2202 {
2203         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2204                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2205         else
2206                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2207                                                 IWM_NVM_VERSION_8000));
2208 }
2209
2210 static int
2211 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2212                   const uint16_t *phy_sku)
2213 {
2214         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2215                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2216
2217         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2218 }
2219
2220 static int
2221 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2222 {
2223         int n_hw_addr;
2224
2225         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2226                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2227
2228         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2229
2230         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2231 }
2232
2233 static void
2234 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2235                   uint32_t radio_cfg)
2236 {
2237         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2238                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2239                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2240                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2241                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2242                 return;
2243         }
2244
2245         /* set the radio configuration for family 8000 */
2246         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2247         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2248         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2249         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2250         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2251         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2252 }
2253
2254 static int
2255 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2256                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2257 {
2258 #ifdef notyet /* for FAMILY 9000 */
2259         if (cfg->mac_addr_from_csr) {
2260                 iwm_set_hw_address_from_csr(sc, data);
2261         } else
2262 #endif
2263         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2264                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2265
2266                 /* The byte order is little endian 16 bit, meaning 214365 */
2267                 data->hw_addr[0] = hw_addr[1];
2268                 data->hw_addr[1] = hw_addr[0];
2269                 data->hw_addr[2] = hw_addr[3];
2270                 data->hw_addr[3] = hw_addr[2];
2271                 data->hw_addr[4] = hw_addr[5];
2272                 data->hw_addr[5] = hw_addr[4];
2273         } else {
2274                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2275         }
2276
2277         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2278                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2279                 return EINVAL;
2280         }
2281
2282         return 0;
2283 }
2284
2285 static struct iwm_nvm_data *
2286 iwm_parse_nvm_data(struct iwm_softc *sc,
2287                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2288                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2289                    const uint16_t *phy_sku, const uint16_t *regulatory)
2290 {
2291         struct iwm_nvm_data *data;
2292         uint32_t sku, radio_cfg;
2293
2294         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2295                 data = malloc(sizeof(*data) +
2296                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2297                     M_DEVBUF, M_NOWAIT | M_ZERO);
2298         } else {
2299                 data = malloc(sizeof(*data) +
2300                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2301                     M_DEVBUF, M_NOWAIT | M_ZERO);
2302         }
2303         if (!data)
2304                 return NULL;
2305
2306         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2307
2308         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2309         iwm_set_radio_cfg(sc, data, radio_cfg);
2310
2311         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2312         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2313         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2314         data->sku_cap_11n_enable = 0;
2315
2316         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2317
2318         /* If no valid mac address was found - bail out */
2319         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2320                 free(data, M_DEVBUF);
2321                 return NULL;
2322         }
2323
2324         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2325                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2326                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2327         } else {
2328                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2329                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2330         }
2331
2332         return data;
2333 }
2334
2335 static void
2336 iwm_free_nvm_data(struct iwm_nvm_data *data)
2337 {
2338         if (data != NULL)
2339                 free(data, M_DEVBUF);
2340 }
2341
2342 static struct iwm_nvm_data *
2343 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2344 {
2345         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2346
2347         /* Checking for required sections */
2348         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2349                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2350                     !sections[sc->cfg->nvm_hw_section_num].data) {
2351                         device_printf(sc->sc_dev,
2352                             "Can't parse empty OTP/NVM sections\n");
2353                         return NULL;
2354                 }
2355         } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2356                 /* SW and REGULATORY sections are mandatory */
2357                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2358                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2359                         device_printf(sc->sc_dev,
2360                             "Can't parse empty OTP/NVM sections\n");
2361                         return NULL;
2362                 }
2363                 /* MAC_OVERRIDE or at least HW section must exist */
2364                 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2365                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2366                         device_printf(sc->sc_dev,
2367                             "Can't parse mac_address, empty sections\n");
2368                         return NULL;
2369                 }
2370
2371                 /* PHY_SKU section is mandatory in B0 */
2372                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2373                         device_printf(sc->sc_dev,
2374                             "Can't parse phy_sku in B0, empty sections\n");
2375                         return NULL;
2376                 }
2377         } else {
2378                 panic("unknown device family %d\n", sc->cfg->device_family);
2379         }
2380
2381         hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2382         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2383         calib = (const uint16_t *)
2384             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2385         regulatory = (const uint16_t *)
2386             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2387         mac_override = (const uint16_t *)
2388             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2389         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2390
2391         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2392             phy_sku, regulatory);
2393 }
2394
2395 static int
2396 iwm_nvm_init(struct iwm_softc *sc)
2397 {
2398         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2399         int i, ret, section;
2400         uint32_t size_read = 0;
2401         uint8_t *nvm_buffer, *temp;
2402         uint16_t len;
2403
2404         memset(nvm_sections, 0, sizeof(nvm_sections));
2405
2406         if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2407                 return EINVAL;
2408
2409         /* load NVM values from nic */
2410         /* Read From FW NVM */
2411         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2412
2413         nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2414         if (!nvm_buffer)
2415                 return ENOMEM;
2416         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2417                 /* we override the constness for initial read */
2418                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2419                                            &len, size_read);
2420                 if (ret)
2421                         continue;
2422                 size_read += len;
2423                 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2424                 if (!temp) {
2425                         ret = ENOMEM;
2426                         break;
2427                 }
2428                 memcpy(temp, nvm_buffer, len);
2429
2430                 nvm_sections[section].data = temp;
2431                 nvm_sections[section].length = len;
2432         }
2433         if (!size_read)
2434                 device_printf(sc->sc_dev, "OTP is blank\n");
2435         free(nvm_buffer, M_DEVBUF);
2436
2437         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2438         if (!sc->nvm_data)
2439                 return EINVAL;
2440         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2441                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2442
2443         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2444                 if (nvm_sections[i].data != NULL)
2445                         free(nvm_sections[i].data, M_DEVBUF);
2446         }
2447
2448         return 0;
2449 }
2450
2451 static int
2452 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2453         const struct iwm_fw_desc *section)
2454 {
2455         struct iwm_dma_info *dma = &sc->fw_dma;
2456         uint8_t *v_addr;
2457         bus_addr_t p_addr;
2458         uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2459         int ret = 0;
2460
2461         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2462                     "%s: [%d] uCode section being loaded...\n",
2463                     __func__, section_num);
2464
2465         v_addr = dma->vaddr;
2466         p_addr = dma->paddr;
2467
2468         for (offset = 0; offset < section->len; offset += chunk_sz) {
2469                 uint32_t copy_size, dst_addr;
2470                 int extended_addr = FALSE;
2471
2472                 copy_size = MIN(chunk_sz, section->len - offset);
2473                 dst_addr = section->offset + offset;
2474
2475                 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2476                     dst_addr <= IWM_FW_MEM_EXTENDED_END)
2477                         extended_addr = TRUE;
2478
2479                 if (extended_addr)
2480                         iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2481                                           IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2482
2483                 memcpy(v_addr, (const uint8_t *)section->data + offset,
2484                     copy_size);
2485                 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2486                 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2487                                                    copy_size);
2488
2489                 if (extended_addr)
2490                         iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2491                                             IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2492
2493                 if (ret) {
2494                         device_printf(sc->sc_dev,
2495                             "%s: Could not load the [%d] uCode section\n",
2496                             __func__, section_num);
2497                         break;
2498                 }
2499         }
2500
2501         return ret;
2502 }
2503
2504 /*
2505  * ucode
2506  */
2507 static int
2508 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2509                              bus_addr_t phy_addr, uint32_t byte_cnt)
2510 {
2511         int ret;
2512
2513         sc->sc_fw_chunk_done = 0;
2514
2515         if (!iwm_nic_lock(sc))
2516                 return EBUSY;
2517
2518         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2519             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2520
2521         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2522             dst_addr);
2523
2524         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2525             phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2526
2527         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2528             (iwm_get_dma_hi_addr(phy_addr)
2529              << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2530
2531         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2532             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2533             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2534             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2535
2536         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2537             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2538             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2539             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2540
2541         iwm_nic_unlock(sc);
2542
2543         /* wait up to 5s for this segment to load */
2544         ret = 0;
2545         while (!sc->sc_fw_chunk_done) {
2546                 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2547                 if (ret)
2548                         break;
2549         }
2550
2551         if (ret != 0) {
2552                 device_printf(sc->sc_dev,
2553                     "fw chunk addr 0x%x len %d failed to load\n",
2554                     dst_addr, byte_cnt);
2555                 return ETIMEDOUT;
2556         }
2557
2558         return 0;
2559 }
2560
2561 static int
2562 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2563         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2564 {
2565         int shift_param;
2566         int i, ret = 0, sec_num = 0x1;
2567         uint32_t val, last_read_idx = 0;
2568
2569         if (cpu == 1) {
2570                 shift_param = 0;
2571                 *first_ucode_section = 0;
2572         } else {
2573                 shift_param = 16;
2574                 (*first_ucode_section)++;
2575         }
2576
2577         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2578                 last_read_idx = i;
2579
2580                 /*
2581                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2582                  * CPU1 to CPU2.
2583                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2584                  * CPU2 non paged to CPU2 paging sec.
2585                  */
2586                 if (!image->fw_sect[i].data ||
2587                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2588                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2589                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2590                                     "Break since Data not valid or Empty section, sec = %d\n",
2591                                     i);
2592                         break;
2593                 }
2594                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2595                 if (ret)
2596                         return ret;
2597
2598                 /* Notify the ucode of the loaded section number and status */
2599                 if (iwm_nic_lock(sc)) {
2600                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2601                         val = val | (sec_num << shift_param);
2602                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2603                         sec_num = (sec_num << 1) | 0x1;
2604                         iwm_nic_unlock(sc);
2605                 }
2606         }
2607
2608         *first_ucode_section = last_read_idx;
2609
2610         iwm_enable_interrupts(sc);
2611
2612         if (iwm_nic_lock(sc)) {
2613                 if (cpu == 1)
2614                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2615                 else
2616                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2617                 iwm_nic_unlock(sc);
2618         }
2619
2620         return 0;
2621 }
2622
2623 static int
2624 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2625         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2626 {
2627         int shift_param;
2628         int i, ret = 0;
2629         uint32_t last_read_idx = 0;
2630
2631         if (cpu == 1) {
2632                 shift_param = 0;
2633                 *first_ucode_section = 0;
2634         } else {
2635                 shift_param = 16;
2636                 (*first_ucode_section)++;
2637         }
2638
2639         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2640                 last_read_idx = i;
2641
2642                 /*
2643                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2644                  * CPU1 to CPU2.
2645                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2646                  * CPU2 non paged to CPU2 paging sec.
2647                  */
2648                 if (!image->fw_sect[i].data ||
2649                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2650                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2651                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2652                                     "Break since Data not valid or Empty section, sec = %d\n",
2653                                      i);
2654                         break;
2655                 }
2656
2657                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2658                 if (ret)
2659                         return ret;
2660         }
2661
2662         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2663                 iwm_set_bits_prph(sc,
2664                                   IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2665                                   (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2666                                    IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2667                                    IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2668                                         shift_param);
2669
2670         *first_ucode_section = last_read_idx;
2671
2672         return 0;
2673
2674 }
2675
2676 static int
2677 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2678         const struct iwm_fw_sects *image)
2679 {
2680         int ret = 0;
2681         int first_ucode_section;
2682
2683         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2684                      image->is_dual_cpus ? "Dual" : "Single");
2685
2686         /* load to FW the binary non secured sections of CPU1 */
2687         ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2688         if (ret)
2689                 return ret;
2690
2691         if (image->is_dual_cpus) {
2692                 /* set CPU2 header address */
2693                 iwm_write_prph(sc,
2694                                IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2695                                IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2696
2697                 /* load to FW the binary sections of CPU2 */
2698                 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2699                                                  &first_ucode_section);
2700                 if (ret)
2701                         return ret;
2702         }
2703
2704         iwm_enable_interrupts(sc);
2705
2706         /* release CPU reset */
2707         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2708
2709         return 0;
2710 }
2711
2712 int
2713 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2714         const struct iwm_fw_sects *image)
2715 {
2716         int ret = 0;
2717         int first_ucode_section;
2718
2719         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2720                     image->is_dual_cpus ? "Dual" : "Single");
2721
2722         /* configure the ucode to be ready to get the secured image */
2723         /* release CPU reset */
2724         iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2725
2726         /* load to FW the binary Secured sections of CPU1 */
2727         ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2728             &first_ucode_section);
2729         if (ret)
2730                 return ret;
2731
2732         /* load to FW the binary sections of CPU2 */
2733         return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2734             &first_ucode_section);
2735 }
2736
2737 /* XXX Get rid of this definition */
2738 static inline void
2739 iwm_enable_fw_load_int(struct iwm_softc *sc)
2740 {
2741         IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2742         sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2743         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2744 }
2745
2746 /* XXX Add proper rfkill support code */
2747 static int
2748 iwm_start_fw(struct iwm_softc *sc,
2749         const struct iwm_fw_sects *fw)
2750 {
2751         int ret;
2752
2753         /* This may fail if AMT took ownership of the device */
2754         if (iwm_prepare_card_hw(sc)) {
2755                 device_printf(sc->sc_dev,
2756                     "%s: Exit HW not ready\n", __func__);
2757                 ret = EIO;
2758                 goto out;
2759         }
2760
2761         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2762
2763         iwm_disable_interrupts(sc);
2764
2765         /* make sure rfkill handshake bits are cleared */
2766         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2767         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2768             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2769
2770         /* clear (again), then enable host interrupts */
2771         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2772
2773         ret = iwm_nic_init(sc);
2774         if (ret) {
2775                 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2776                 goto out;
2777         }
2778
2779         /*
2780          * Now, we load the firmware and don't want to be interrupted, even
2781          * by the RF-Kill interrupt (hence mask all the interrupt besides the
2782          * FH_TX interrupt which is needed to load the firmware). If the
2783          * RF-Kill switch is toggled, we will find out after having loaded
2784          * the firmware and return the proper value to the caller.
2785          */
2786         iwm_enable_fw_load_int(sc);
2787
2788         /* really make sure rfkill handshake bits are cleared */
2789         /* maybe we should write a few times more?  just to make sure */
2790         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2791         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2792
2793         /* Load the given image to the HW */
2794         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2795                 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2796         else
2797                 ret = iwm_pcie_load_given_ucode(sc, fw);
2798
2799         /* XXX re-check RF-Kill state */
2800
2801 out:
2802         return ret;
2803 }
2804
2805 static int
2806 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2807 {
2808         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2809                 .valid = htole32(valid_tx_ant),
2810         };
2811
2812         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2813             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2814 }
2815
2816 /* iwlwifi: mvm/fw.c */
2817 static int
2818 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2819 {
2820         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2821         enum iwm_ucode_type ucode_type = sc->cur_ucode;
2822
2823         /* Set parameters */
2824         phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2825         phy_cfg_cmd.calib_control.event_trigger =
2826             sc->sc_default_calib[ucode_type].event_trigger;
2827         phy_cfg_cmd.calib_control.flow_trigger =
2828             sc->sc_default_calib[ucode_type].flow_trigger;
2829
2830         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2831             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2832         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2833             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2834 }
2835
2836 static int
2837 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2838 {
2839         struct iwm_mvm_alive_data *alive_data = data;
2840         struct iwm_mvm_alive_resp_ver1 *palive1;
2841         struct iwm_mvm_alive_resp_ver2 *palive2;
2842         struct iwm_mvm_alive_resp *palive;
2843
2844         if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2845                 palive1 = (void *)pkt->data;
2846
2847                 sc->support_umac_log = FALSE;
2848                 sc->error_event_table =
2849                         le32toh(palive1->error_event_table_ptr);
2850                 sc->log_event_table =
2851                         le32toh(palive1->log_event_table_ptr);
2852                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2853
2854                 alive_data->valid = le16toh(palive1->status) ==
2855                                     IWM_ALIVE_STATUS_OK;
2856                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2857                             "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2858                              le16toh(palive1->status), palive1->ver_type,
2859                              palive1->ver_subtype, palive1->flags);
2860         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2861                 palive2 = (void *)pkt->data;
2862                 sc->error_event_table =
2863                         le32toh(palive2->error_event_table_ptr);
2864                 sc->log_event_table =
2865                         le32toh(palive2->log_event_table_ptr);
2866                 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2867                 sc->umac_error_event_table =
2868                         le32toh(palive2->error_info_addr);
2869
2870                 alive_data->valid = le16toh(palive2->status) ==
2871                                     IWM_ALIVE_STATUS_OK;
2872                 if (sc->umac_error_event_table)
2873                         sc->support_umac_log = TRUE;
2874
2875                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2876                             "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2877                             le16toh(palive2->status), palive2->ver_type,
2878                             palive2->ver_subtype, palive2->flags);
2879
2880                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2881                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2882                             palive2->umac_major, palive2->umac_minor);
2883         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2884                 palive = (void *)pkt->data;
2885
2886                 sc->error_event_table =
2887                         le32toh(palive->error_event_table_ptr);
2888                 sc->log_event_table =
2889                         le32toh(palive->log_event_table_ptr);
2890                 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2891                 sc->umac_error_event_table =
2892                         le32toh(palive->error_info_addr);
2893
2894                 alive_data->valid = le16toh(palive->status) ==
2895                                     IWM_ALIVE_STATUS_OK;
2896                 if (sc->umac_error_event_table)
2897                         sc->support_umac_log = TRUE;
2898
2899                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2900                             "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2901                             le16toh(palive->status), palive->ver_type,
2902                             palive->ver_subtype, palive->flags);
2903
2904                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2905                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2906                             le32toh(palive->umac_major),
2907                             le32toh(palive->umac_minor));
2908         }
2909
2910         return TRUE;
2911 }
2912
2913 static int
2914 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2915         struct iwm_rx_packet *pkt, void *data)
2916 {
2917         struct iwm_phy_db *phy_db = data;
2918
2919         if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2920                 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2921                         device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2922                             __func__, pkt->hdr.code);
2923                 }
2924                 return TRUE;
2925         }
2926
2927         if (iwm_phy_db_set_section(phy_db, pkt)) {
2928                 device_printf(sc->sc_dev,
2929                     "%s: iwm_phy_db_set_section failed\n", __func__);
2930         }
2931
2932         return FALSE;
2933 }
2934
2935 static int
2936 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2937         enum iwm_ucode_type ucode_type)
2938 {
2939         struct iwm_notification_wait alive_wait;
2940         struct iwm_mvm_alive_data alive_data;
2941         const struct iwm_fw_sects *fw;
2942         enum iwm_ucode_type old_type = sc->cur_ucode;
2943         int error;
2944         static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2945
2946         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2947                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2948                         error);
2949                 return error;
2950         }
2951         fw = &sc->sc_fw.fw_sects[ucode_type];
2952         sc->cur_ucode = ucode_type;
2953         sc->ucode_loaded = FALSE;
2954
2955         memset(&alive_data, 0, sizeof(alive_data));
2956         iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2957                                    alive_cmd, nitems(alive_cmd),
2958                                    iwm_alive_fn, &alive_data);
2959
2960         error = iwm_start_fw(sc, fw);
2961         if (error) {
2962                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2963                 sc->cur_ucode = old_type;
2964                 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2965                 return error;
2966         }
2967
2968         /*
2969          * Some things may run in the background now, but we
2970          * just wait for the ALIVE notification here.
2971          */
2972         IWM_UNLOCK(sc);
2973         error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2974                                       IWM_MVM_UCODE_ALIVE_TIMEOUT);
2975         IWM_LOCK(sc);
2976         if (error) {
2977                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2978                         device_printf(sc->sc_dev,
2979                             "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2980                             iwm_read_prph(sc, IWM_SB_CPU_1_STATUS),
2981                             iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2982                 }
2983                 sc->cur_ucode = old_type;
2984                 return error;
2985         }
2986
2987         if (!alive_data.valid) {
2988                 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2989                     __func__);
2990                 sc->cur_ucode = old_type;
2991                 return EIO;
2992         }
2993
2994         iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2995
2996         /*
2997          * configure and operate fw paging mechanism.
2998          * driver configures the paging flow only once, CPU2 paging image
2999          * included in the IWM_UCODE_INIT image.
3000          */
3001         if (fw->paging_mem_size) {
3002                 /* XXX implement FW paging */
3003                 device_printf(sc->sc_dev,
3004                     "%s: XXX FW paging not implemented yet\n", __func__);
3005         }
3006
3007         if (!error)
3008                 sc->ucode_loaded = TRUE;
3009         return error;
3010 }
3011
3012 /*
3013  * mvm misc bits
3014  */
3015
3016 /*
3017  * follows iwlwifi/fw.c
3018  */
3019 static int
3020 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3021 {
3022         struct iwm_notification_wait calib_wait;
3023         static const uint16_t init_complete[] = {
3024                 IWM_INIT_COMPLETE_NOTIF,
3025                 IWM_CALIB_RES_NOTIF_PHY_DB
3026         };
3027         int ret;
3028
3029         /* do not operate with rfkill switch turned on */
3030         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3031                 device_printf(sc->sc_dev,
3032                     "radio is disabled by hardware switch\n");
3033                 return EPERM;
3034         }
3035
3036         iwm_init_notification_wait(sc->sc_notif_wait,
3037                                    &calib_wait,
3038                                    init_complete,
3039                                    nitems(init_complete),
3040                                    iwm_wait_phy_db_entry,
3041                                    sc->sc_phy_db);
3042
3043         /* Will also start the device */
3044         ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
3045         if (ret) {
3046                 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
3047                     ret);
3048                 goto error;
3049         }
3050
3051         if (justnvm) {
3052                 /* Read nvm */
3053                 ret = iwm_nvm_init(sc);
3054                 if (ret) {
3055                         device_printf(sc->sc_dev, "failed to read nvm\n");
3056                         goto error;
3057                 }
3058                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3059                 goto error;
3060         }
3061
3062         ret = iwm_send_bt_init_conf(sc);
3063         if (ret) {
3064                 device_printf(sc->sc_dev,
3065                     "failed to send bt coex configuration: %d\n", ret);
3066                 goto error;
3067         }
3068
3069         /* Init Smart FIFO. */
3070         ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
3071         if (ret)
3072                 goto error;
3073
3074         /* Send TX valid antennas before triggering calibrations */
3075         ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3076         if (ret) {
3077                 device_printf(sc->sc_dev,
3078                     "failed to send antennas before calibration: %d\n", ret);
3079                 goto error;
3080         }
3081
3082         /*
3083          * Send phy configurations command to init uCode
3084          * to start the 16.0 uCode init image internal calibrations.
3085          */
3086         ret = iwm_send_phy_cfg_cmd(sc);
3087         if (ret) {
3088                 device_printf(sc->sc_dev,
3089                     "%s: Failed to run INIT calibrations: %d\n",
3090                     __func__, ret);
3091                 goto error;
3092         }
3093
3094         /*
3095          * Nothing to do but wait for the init complete notification
3096          * from the firmware.
3097          */
3098         IWM_UNLOCK(sc);
3099         ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3100             IWM_MVM_UCODE_CALIB_TIMEOUT);
3101         IWM_LOCK(sc);
3102
3103
3104         goto out;
3105
3106 error:
3107         iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3108 out:
3109         return ret;
3110 }
3111
3112 /*
3113  * receive side
3114  */
3115
3116 /* (re)stock rx ring, called at init-time and at runtime */
3117 static int
3118 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3119 {
3120         struct iwm_rx_ring *ring = &sc->rxq;
3121         struct iwm_rx_data *data = &ring->data[idx];
3122         struct mbuf *m;
3123         bus_dmamap_t dmamap = NULL;
3124         bus_dma_segment_t seg;
3125         int nsegs, error;
3126
3127         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3128         if (m == NULL)
3129                 return ENOBUFS;
3130
3131         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3132         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3133             &seg, &nsegs, BUS_DMA_NOWAIT);
3134         if (error != 0) {
3135                 device_printf(sc->sc_dev,
3136                     "%s: can't map mbuf, error %d\n", __func__, error);
3137                 goto fail;
3138         }
3139
3140         if (data->m != NULL)
3141                 bus_dmamap_unload(ring->data_dmat, data->map);
3142
3143         /* Swap ring->spare_map with data->map */
3144         dmamap = data->map;
3145         data->map = ring->spare_map;
3146         ring->spare_map = dmamap;
3147
3148         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3149         data->m = m;
3150
3151         /* Update RX descriptor. */
3152         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3153         ring->desc[idx] = htole32(seg.ds_addr >> 8);
3154         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3155             BUS_DMASYNC_PREWRITE);
3156
3157         return 0;
3158 fail:
3159         m_freem(m);
3160         return error;
3161 }
3162
3163 /* iwlwifi: mvm/rx.c */
3164 #define IWM_RSSI_OFFSET 50
3165 static int
3166 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3167 {
3168         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3169         uint32_t agc_a, agc_b;
3170         uint32_t val;
3171
3172         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3173         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3174         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3175
3176         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3177         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3178         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3179
3180         /*
3181          * dBm = rssi dB - agc dB - constant.
3182          * Higher AGC (higher radio gain) means lower signal.
3183          */
3184         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3185         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3186         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3187
3188         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3189             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3190             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
3191
3192         return max_rssi_dbm;
3193 }
3194
3195 /* iwlwifi: mvm/rx.c */
3196 /*
3197  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3198  * values are reported by the fw as positive values - need to negate
3199  * to obtain their dBM.  Account for missing antennas by replacing 0
3200  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3201  */
3202 static int
3203 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3204 {
3205         int energy_a, energy_b, energy_c, max_energy;
3206         uint32_t val;
3207
3208         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3209         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3210             IWM_RX_INFO_ENERGY_ANT_A_POS;
3211         energy_a = energy_a ? -energy_a : -256;
3212         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3213             IWM_RX_INFO_ENERGY_ANT_B_POS;
3214         energy_b = energy_b ? -energy_b : -256;
3215         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3216             IWM_RX_INFO_ENERGY_ANT_C_POS;
3217         energy_c = energy_c ? -energy_c : -256;
3218         max_energy = MAX(energy_a, energy_b);
3219         max_energy = MAX(max_energy, energy_c);
3220
3221         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3222             "energy In A %d B %d C %d , and max %d\n",
3223             energy_a, energy_b, energy_c, max_energy);
3224
3225         return max_energy;
3226 }
3227
3228 static void
3229 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
3230         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3231 {
3232         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3233
3234         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3235         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3236
3237         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3238 }
3239
3240 /*
3241  * Retrieve the average noise (in dBm) among receivers.
3242  */
3243 static int
3244 iwm_get_noise(struct iwm_softc *sc,
3245     const struct iwm_mvm_statistics_rx_non_phy *stats)
3246 {
3247         int i, total, nbant, noise;
3248
3249         total = nbant = noise = 0;
3250         for (i = 0; i < 3; i++) {
3251                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3252                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3253                     __func__,
3254                     i,
3255                     noise);
3256
3257                 if (noise) {
3258                         total += noise;
3259                         nbant++;
3260                 }
3261         }
3262
3263         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3264             __func__, nbant, total);
3265 #if 0
3266         /* There should be at least one antenna but check anyway. */
3267         return (nbant == 0) ? -127 : (total / nbant) - 107;
3268 #else
3269         /* For now, just hard-code it to -96 to be safe */
3270         return (-96);
3271 #endif
3272 }
3273
3274 /*
3275  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3276  *
3277  * Handles the actual data of the Rx packet from the fw
3278  */
3279 static void
3280 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3281         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3282 {
3283         struct ieee80211com *ic = &sc->sc_ic;
3284         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3285         struct ieee80211_frame *wh;
3286         struct ieee80211_node *ni;
3287         struct ieee80211_rx_stats rxs;
3288         struct mbuf *m;
3289         struct iwm_rx_phy_info *phy_info;
3290         struct iwm_rx_mpdu_res_start *rx_res;
3291         uint32_t len;
3292         uint32_t rx_pkt_status;
3293         int rssi;
3294
3295         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3296
3297         phy_info = &sc->sc_last_phy_info;
3298         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3299         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3300         len = le16toh(rx_res->byte_count);
3301         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3302
3303         m = data->m;
3304         m->m_data = pkt->data + sizeof(*rx_res);
3305         m->m_pkthdr.len = m->m_len = len;
3306
3307         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3308                 device_printf(sc->sc_dev,
3309                     "dsp size out of range [0,20]: %d\n",
3310                     phy_info->cfg_phy_cnt);
3311                 goto fail;
3312         }
3313
3314         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3315             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3316                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3317                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3318                 goto fail;
3319         }
3320
3321         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3322                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3323         } else {
3324                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3325         }
3326
3327         /* Note: RSSI is absolute (ie a -ve value) */
3328         if (rssi < IWM_MIN_DBM)
3329                 rssi = IWM_MIN_DBM;
3330         else if (rssi > IWM_MAX_DBM)
3331                 rssi = IWM_MAX_DBM;
3332
3333         /* Map it to relative value */
3334         rssi = rssi - sc->sc_noise;
3335
3336         /* replenish ring for the buffer we're going to feed to the sharks */
3337         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3338                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3339                     __func__);
3340                 goto fail;
3341         }
3342
3343         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3344             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3345
3346         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3347
3348         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3349             "%s: phy_info: channel=%d, flags=0x%08x\n",
3350             __func__,
3351             le16toh(phy_info->channel),
3352             le16toh(phy_info->phy_flags));
3353
3354         /*
3355          * Populate an RX state struct with the provided information.
3356          */
3357         bzero(&rxs, sizeof(rxs));
3358         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3359         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3360         rxs.c_ieee = le16toh(phy_info->channel);
3361         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3362                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3363         } else {
3364                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3365         }
3366
3367         /* rssi is in 1/2db units */
3368         rxs.c_rssi = rssi * 2;
3369         rxs.c_nf = sc->sc_noise;
3370         if (ieee80211_add_rx_params(m, &rxs) == 0) {
3371                 if (ni)
3372                         ieee80211_free_node(ni);
3373                 goto fail;
3374         }
3375
3376         if (ieee80211_radiotap_active_vap(vap)) {
3377                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3378
3379                 tap->wr_flags = 0;
3380                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3381                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3382                 tap->wr_chan_freq = htole16(rxs.c_freq);
3383                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3384                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3385                 tap->wr_dbm_antsignal = (int8_t)rssi;
3386                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3387                 tap->wr_tsft = phy_info->system_timestamp;
3388                 switch (phy_info->rate) {
3389                 /* CCK rates. */
3390                 case  10: tap->wr_rate =   2; break;
3391                 case  20: tap->wr_rate =   4; break;
3392                 case  55: tap->wr_rate =  11; break;
3393                 case 110: tap->wr_rate =  22; break;
3394                 /* OFDM rates. */
3395                 case 0xd: tap->wr_rate =  12; break;
3396                 case 0xf: tap->wr_rate =  18; break;
3397                 case 0x5: tap->wr_rate =  24; break;
3398                 case 0x7: tap->wr_rate =  36; break;
3399                 case 0x9: tap->wr_rate =  48; break;
3400                 case 0xb: tap->wr_rate =  72; break;
3401                 case 0x1: tap->wr_rate =  96; break;
3402                 case 0x3: tap->wr_rate = 108; break;
3403                 /* Unknown rate: should not happen. */
3404                 default:  tap->wr_rate =   0;
3405                 }
3406         }
3407
3408         IWM_UNLOCK(sc);
3409         if (ni != NULL) {
3410                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3411                 ieee80211_input_mimo(ni, m);
3412                 ieee80211_free_node(ni);
3413         } else {
3414                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3415                 ieee80211_input_mimo_all(ic, m);
3416         }
3417         IWM_LOCK(sc);
3418
3419         return;
3420
3421 fail:
3422         counter_u64_add(ic->ic_ierrors, 1);
3423 }
3424
3425 static int
3426 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3427         struct iwm_node *in)
3428 {
3429         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3430         struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3431         struct ieee80211_node *ni = &in->in_ni;
3432         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3433
3434         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3435
3436         /* Update rate control statistics. */
3437         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3438             __func__,
3439             (int) le16toh(tx_resp->status.status),
3440             (int) le16toh(tx_resp->status.sequence),
3441             tx_resp->frame_count,
3442             tx_resp->bt_kill_count,
3443             tx_resp->failure_rts,
3444             tx_resp->failure_frame,
3445             le32toh(tx_resp->initial_rate),
3446             (int) le16toh(tx_resp->wireless_media_time));
3447
3448         txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3449                      IEEE80211_RATECTL_STATUS_LONG_RETRY;
3450         txs->short_retries = tx_resp->failure_rts;
3451         txs->long_retries = tx_resp->failure_frame;
3452         if (status != IWM_TX_STATUS_SUCCESS &&
3453             status != IWM_TX_STATUS_DIRECT_DONE) {
3454                 switch (status) {
3455                 case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3456                         txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3457                         break;
3458                 case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3459                         txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3460                         break;
3461                 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3462                         txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3463                         break;
3464                 default:
3465                         txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3466                         break;
3467                 }
3468         } else {
3469                 txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3470         }
3471         ieee80211_ratectl_tx_complete(ni, txs);
3472
3473         return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3474 }
3475
3476 static void
3477 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3478         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3479 {
3480         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3481         int idx = cmd_hdr->idx;
3482         int qid = cmd_hdr->qid;
3483         struct iwm_tx_ring *ring = &sc->txq[qid];
3484         struct iwm_tx_data *txd = &ring->data[idx];
3485         struct iwm_node *in = txd->in;
3486         struct mbuf *m = txd->m;
3487         int status;
3488
3489         KASSERT(txd->done == 0, ("txd not done"));
3490         KASSERT(txd->in != NULL, ("txd without node"));
3491         KASSERT(txd->m != NULL, ("txd without mbuf"));
3492
3493         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3494
3495         sc->sc_tx_timer = 0;
3496
3497         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3498
3499         /* Unmap and free mbuf. */
3500         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3501         bus_dmamap_unload(ring->data_dmat, txd->map);
3502
3503         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3504             "free txd %p, in %p\n", txd, txd->in);
3505         txd->done = 1;
3506         txd->m = NULL;
3507         txd->in = NULL;
3508
3509         ieee80211_tx_complete(&in->in_ni, m, status);
3510
3511         if (--ring->queued < IWM_TX_RING_LOMARK) {
3512                 sc->qfullmsk &= ~(1 << ring->qid);
3513                 if (sc->qfullmsk == 0) {
3514                         iwm_start(sc);
3515                 }
3516         }
3517 }
3518
3519 /*
3520  * transmit side
3521  */
3522
3523 /*
3524  * Process a "command done" firmware notification.  This is where we wakeup
3525  * processes waiting for a synchronous command completion.
3526  * from if_iwn
3527  */
3528 static void
3529 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3530 {
3531         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3532         struct iwm_tx_data *data;
3533
3534         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3535                 return; /* Not a command ack. */
3536         }
3537
3538         /* XXX wide commands? */
3539         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3540             "cmd notification type 0x%x qid %d idx %d\n",
3541             pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3542
3543         data = &ring->data[pkt->hdr.idx];
3544
3545         /* If the command was mapped in an mbuf, free it. */
3546         if (data->m != NULL) {
3547                 bus_dmamap_sync(ring->data_dmat, data->map,
3548                     BUS_DMASYNC_POSTWRITE);
3549                 bus_dmamap_unload(ring->data_dmat, data->map);
3550                 m_freem(data->m);
3551                 data->m = NULL;
3552         }
3553         wakeup(&ring->desc[pkt->hdr.idx]);
3554
3555         if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3556                 device_printf(sc->sc_dev,
3557                     "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3558                     __func__, pkt->hdr.idx, ring->queued, ring->cur);
3559                 /* XXX call iwm_force_nmi() */
3560         }
3561
3562         KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3563         ring->queued--;
3564         if (ring->queued == 0)
3565                 iwm_pcie_clear_cmd_in_flight(sc);
3566 }
3567
3568 #if 0
3569 /*
3570  * necessary only for block ack mode
3571  */
3572 void
3573 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3574         uint16_t len)
3575 {
3576         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3577         uint16_t w_val;
3578
3579         scd_bc_tbl = sc->sched_dma.vaddr;
3580
3581         len += 8; /* magic numbers came naturally from paris */
3582         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3583                 len = roundup(len, 4) / 4;
3584
3585         w_val = htole16(sta_id << 12 | len);
3586
3587         /* Update TX scheduler. */
3588         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3589         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3590             BUS_DMASYNC_PREWRITE);
3591
3592         /* I really wonder what this is ?!? */
3593         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3594                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3595                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3596                     BUS_DMASYNC_PREWRITE);
3597         }
3598 }
3599 #endif
3600
3601 /*
3602  * Take an 802.11 (non-n) rate, find the relevant rate
3603  * table entry.  return the index into in_ridx[].
3604  *
3605  * The caller then uses that index back into in_ridx
3606  * to figure out the rate index programmed /into/
3607  * the firmware for this given node.
3608  */
3609 static int
3610 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3611     uint8_t rate)
3612 {
3613         int i;
3614         uint8_t r;
3615
3616         for (i = 0; i < nitems(in->in_ridx); i++) {
3617                 r = iwm_rates[in->in_ridx[i]].rate;
3618                 if (rate == r)
3619                         return (i);
3620         }
3621
3622         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3623             "%s: couldn't find an entry for rate=%d\n",
3624             __func__,
3625             rate);
3626
3627         /* XXX Return the first */
3628         /* XXX TODO: have it return the /lowest/ */
3629         return (0);
3630 }
3631
3632 static int
3633 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3634 {
3635         int i;
3636
3637         for (i = 0; i < nitems(iwm_rates); i++) {
3638                 if (iwm_rates[i].rate == rate)
3639                         return (i);
3640         }
3641         /* XXX error? */
3642         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3643             "%s: couldn't find an entry for rate=%d\n",
3644             __func__,
3645             rate);
3646         return (0);
3647 }
3648
3649 /*
3650  * Fill in the rate related information for a transmit command.
3651  */
3652 static const struct iwm_rate *
3653 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3654         struct mbuf *m, struct iwm_tx_cmd *tx)
3655 {
3656         struct ieee80211_node *ni = &in->in_ni;
3657         struct ieee80211_frame *wh;
3658         const struct ieee80211_txparam *tp = ni->ni_txparms;
3659         const struct iwm_rate *rinfo;
3660         int type;
3661         int ridx, rate_flags;
3662
3663         wh = mtod(m, struct ieee80211_frame *);
3664         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3665
3666         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3667         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3668
3669         if (type == IEEE80211_FC0_TYPE_MGT) {
3670                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3671                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3672                     "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3673         } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3674                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3675                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3676                     "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3677         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3678                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3679                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3680                     "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3681         } else if (m->m_flags & M_EAPOL) {
3682                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3683                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3684                     "%s: EAPOL\n", __func__);
3685         } else if (type == IEEE80211_FC0_TYPE_DATA) {
3686                 int i;
3687
3688                 /* for data frames, use RS table */
3689                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3690                 /* XXX pass pktlen */
3691                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3692                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3693                 ridx = in->in_ridx[i];
3694
3695                 /* This is the index into the programmed table */
3696                 tx->initial_rate_index = i;
3697                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3698
3699                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3700                     "%s: start with i=%d, txrate %d\n",
3701                     __func__, i, iwm_rates[ridx].rate);
3702         } else {
3703                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3704                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3705                     __func__, tp->mgmtrate);
3706         }
3707
3708         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3709             "%s: frame type=%d txrate %d\n",
3710                 __func__, type, iwm_rates[ridx].rate);
3711
3712         rinfo = &iwm_rates[ridx];
3713
3714         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3715             __func__, ridx,
3716             rinfo->rate,
3717             !! (IWM_RIDX_IS_CCK(ridx))
3718             );
3719
3720         /* XXX TODO: hard-coded TX antenna? */
3721         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3722         if (IWM_RIDX_IS_CCK(ridx))
3723                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3724         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3725
3726         return rinfo;
3727 }
3728
3729 #define TB0_SIZE 16
3730 static int
3731 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3732 {
3733         struct ieee80211com *ic = &sc->sc_ic;
3734         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3735         struct iwm_node *in = IWM_NODE(ni);
3736         struct iwm_tx_ring *ring;
3737         struct iwm_tx_data *data;
3738         struct iwm_tfd *desc;
3739         struct iwm_device_cmd *cmd;
3740         struct iwm_tx_cmd *tx;
3741         struct ieee80211_frame *wh;
3742         struct ieee80211_key *k = NULL;
3743         struct mbuf *m1;
3744         const struct iwm_rate *rinfo;
3745         uint32_t flags;
3746         u_int hdrlen;
3747         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3748         int nsegs;
3749         uint8_t tid, type;
3750         int i, totlen, error, pad;
3751
3752         wh = mtod(m, struct ieee80211_frame *);
3753         hdrlen = ieee80211_anyhdrsize(wh);
3754         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3755         tid = 0;
3756         ring = &sc->txq[ac];
3757         desc = &ring->desc[ring->cur];
3758         memset(desc, 0, sizeof(*desc));
3759         data = &ring->data[ring->cur];
3760
3761         /* Fill out iwm_tx_cmd to send to the firmware */
3762         cmd = &ring->cmd[ring->cur];
3763         cmd->hdr.code = IWM_TX_CMD;
3764         cmd->hdr.flags = 0;
3765         cmd->hdr.qid = ring->qid;
3766         cmd->hdr.idx = ring->cur;
3767
3768         tx = (void *)cmd->data;
3769         memset(tx, 0, sizeof(*tx));
3770
3771         rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3772
3773         /* Encrypt the frame if need be. */
3774         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3775                 /* Retrieve key for TX && do software encryption. */
3776                 k = ieee80211_crypto_encap(ni, m);
3777                 if (k == NULL) {
3778                         m_freem(m);
3779                         return (ENOBUFS);
3780                 }
3781                 /* 802.11 header may have moved. */
3782                 wh = mtod(m, struct ieee80211_frame *);
3783         }
3784
3785         if (ieee80211_radiotap_active_vap(vap)) {
3786                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3787
3788                 tap->wt_flags = 0;
3789                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3790                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3791                 tap->wt_rate = rinfo->rate;
3792                 if (k != NULL)
3793                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3794                 ieee80211_radiotap_tx(vap, m);
3795         }
3796
3797
3798         totlen = m->m_pkthdr.len;
3799
3800         flags = 0;
3801         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3802                 flags |= IWM_TX_CMD_FLG_ACK;
3803         }
3804
3805         if (type == IEEE80211_FC0_TYPE_DATA
3806             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3807             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3808                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3809         }
3810
3811         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3812             type != IEEE80211_FC0_TYPE_DATA)
3813                 tx->sta_id = sc->sc_aux_sta.sta_id;
3814         else
3815                 tx->sta_id = IWM_STATION_ID;
3816
3817         if (type == IEEE80211_FC0_TYPE_MGT) {
3818                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3819
3820                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3821                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3822                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3823                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3824                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3825                 } else {
3826                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3827                 }
3828         } else {
3829                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3830         }
3831
3832         if (hdrlen & 3) {
3833                 /* First segment length must be a multiple of 4. */
3834                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3835                 pad = 4 - (hdrlen & 3);
3836         } else
3837                 pad = 0;
3838
3839         tx->driver_txop = 0;
3840         tx->next_frame_len = 0;
3841
3842         tx->len = htole16(totlen);
3843         tx->tid_tspec = tid;
3844         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3845
3846         /* Set physical address of "scratch area". */
3847         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3848         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3849
3850         /* Copy 802.11 header in TX command. */
3851         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3852
3853         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3854
3855         tx->sec_ctl = 0;
3856         tx->tx_flags |= htole32(flags);
3857
3858         /* Trim 802.11 header. */
3859         m_adj(m, hdrlen);
3860         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3861             segs, &nsegs, BUS_DMA_NOWAIT);
3862         if (error != 0) {
3863                 if (error != EFBIG) {
3864                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3865                             error);
3866                         m_freem(m);
3867                         return error;
3868                 }
3869                 /* Too many DMA segments, linearize mbuf. */
3870                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3871                 if (m1 == NULL) {
3872                         device_printf(sc->sc_dev,
3873                             "%s: could not defrag mbuf\n", __func__);
3874                         m_freem(m);
3875                         return (ENOBUFS);
3876                 }
3877                 m = m1;
3878
3879                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3880                     segs, &nsegs, BUS_DMA_NOWAIT);
3881                 if (error != 0) {
3882                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3883                             error);
3884                         m_freem(m);
3885                         return error;
3886                 }
3887         }
3888         data->m = m;
3889         data->in = in;
3890         data->done = 0;
3891
3892         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3893             "sending txd %p, in %p\n", data, data->in);
3894         KASSERT(data->in != NULL, ("node is NULL"));
3895
3896         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3897             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3898             ring->qid, ring->cur, totlen, nsegs,
3899             le32toh(tx->tx_flags),
3900             le32toh(tx->rate_n_flags),
3901             tx->initial_rate_index
3902             );
3903
3904         /* Fill TX descriptor. */
3905         desc->num_tbs = 2 + nsegs;
3906
3907         desc->tbs[0].lo = htole32(data->cmd_paddr);
3908         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3909             (TB0_SIZE << 4);
3910         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3911         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3912             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3913               + hdrlen + pad - TB0_SIZE) << 4);
3914
3915         /* Other DMA segments are for data payload. */
3916         for (i = 0; i < nsegs; i++) {
3917                 seg = &segs[i];
3918                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3919                 desc->tbs[i+2].hi_n_len = \
3920                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3921                     | ((seg->ds_len) << 4);
3922         }
3923
3924         bus_dmamap_sync(ring->data_dmat, data->map,
3925             BUS_DMASYNC_PREWRITE);
3926         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3927             BUS_DMASYNC_PREWRITE);
3928         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3929             BUS_DMASYNC_PREWRITE);
3930
3931 #if 0
3932         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3933 #endif
3934
3935         /* Kick TX ring. */
3936         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3937         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3938
3939         /* Mark TX ring as full if we reach a certain threshold. */
3940         if (++ring->queued > IWM_TX_RING_HIMARK) {
3941                 sc->qfullmsk |= 1 << ring->qid;
3942         }
3943
3944         return 0;
3945 }
3946
3947 static int
3948 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3949     const struct ieee80211_bpf_params *params)
3950 {
3951         struct ieee80211com *ic = ni->ni_ic;
3952         struct iwm_softc *sc = ic->ic_softc;
3953         int error = 0;
3954
3955         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3956             "->%s begin\n", __func__);
3957
3958         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3959                 m_freem(m);
3960                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3961                     "<-%s not RUNNING\n", __func__);
3962                 return (ENETDOWN);
3963         }
3964
3965         IWM_LOCK(sc);
3966         /* XXX fix this */
3967         if (params == NULL) {
3968                 error = iwm_tx(sc, m, ni, 0);
3969         } else {
3970                 error = iwm_tx(sc, m, ni, 0);
3971         }
3972         sc->sc_tx_timer = 5;
3973         IWM_UNLOCK(sc);
3974
3975         return (error);
3976 }
3977
3978 /*
3979  * mvm/tx.c
3980  */
3981
3982 /*
3983  * Note that there are transports that buffer frames before they reach
3984  * the firmware. This means that after flush_tx_path is called, the
3985  * queue might not be empty. The race-free way to handle this is to:
3986  * 1) set the station as draining
3987  * 2) flush the Tx path
3988  * 3) wait for the transport queues to be empty
3989  */
3990 int
3991 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3992 {
3993         int ret;
3994         struct iwm_tx_path_flush_cmd flush_cmd = {
3995                 .queues_ctl = htole32(tfd_msk),
3996                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3997         };
3998
3999         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
4000             sizeof(flush_cmd), &flush_cmd);
4001         if (ret)
4002                 device_printf(sc->sc_dev,
4003                     "Flushing tx queue failed: %d\n", ret);
4004         return ret;
4005 }
4006
4007 /*
4008  * BEGIN mvm/sta.c
4009  */
4010
4011 static int
4012 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
4013         struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
4014 {
4015         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
4016             cmd, status);
4017 }
4018
4019 /* send station add/update command to firmware */
4020 static int
4021 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
4022 {
4023         struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
4024         int ret;
4025         uint32_t status;
4026
4027         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4028
4029         add_sta_cmd.sta_id = IWM_STATION_ID;
4030         add_sta_cmd.mac_id_n_color
4031             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
4032                 IWM_DEFAULT_COLOR));
4033         if (!update) {
4034                 int ac;
4035                 for (ac = 0; ac < WME_NUM_AC; ac++) {
4036                         add_sta_cmd.tfd_queue_msk |=
4037                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
4038                 }
4039                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
4040         }
4041         add_sta_cmd.add_modify = update ? 1 : 0;
4042         add_sta_cmd.station_flags_msk
4043             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4044         add_sta_cmd.tid_disable_tx = htole16(0xffff);
4045         if (update)
4046                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
4047
4048         status = IWM_ADD_STA_SUCCESS;
4049         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
4050         if (ret)
4051                 return ret;
4052
4053         switch (status) {
4054         case IWM_ADD_STA_SUCCESS:
4055                 break;
4056         default:
4057                 ret = EIO;
4058                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
4059                 break;
4060         }
4061
4062         return ret;
4063 }
4064
4065 static int
4066 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
4067 {
4068         return iwm_mvm_sta_send_to_fw(sc, in, 0);
4069 }
4070
4071 static int
4072 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
4073 {
4074         return iwm_mvm_sta_send_to_fw(sc, in, 1);
4075 }
4076
4077 static int
4078 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
4079         const uint8_t *addr, uint16_t mac_id, uint16_t color)
4080 {
4081         struct iwm_mvm_add_sta_cmd_v7 cmd;
4082         int ret;
4083         uint32_t status;
4084
4085         memset(&cmd, 0, sizeof(cmd));
4086         cmd.sta_id = sta->sta_id;
4087         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
4088
4089         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
4090         cmd.tid_disable_tx = htole16(0xffff);
4091
4092         if (addr)
4093                 IEEE80211_ADDR_COPY(cmd.addr, addr);
4094
4095         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
4096         if (ret)
4097                 return ret;
4098
4099         switch (status) {
4100         case IWM_ADD_STA_SUCCESS:
4101                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
4102                     "%s: Internal station added.\n", __func__);
4103                 return 0;
4104         default:
4105                 device_printf(sc->sc_dev,
4106                     "%s: Add internal station failed, status=0x%x\n",
4107                     __func__, status);
4108                 ret = EIO;
4109                 break;
4110         }
4111         return ret;
4112 }
4113
4114 static int
4115 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
4116 {
4117         int ret;
4118
4119         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
4120         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
4121
4122         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
4123         if (ret)
4124                 return ret;
4125
4126         ret = iwm_mvm_add_int_sta_common(sc,
4127             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4128
4129         if (ret)
4130                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4131         return ret;
4132 }
4133
4134 /*
4135  * END mvm/sta.c
4136  */
4137
4138 /*
4139  * BEGIN mvm/quota.c
4140  */
4141
4142 static int
4143 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
4144 {
4145         struct iwm_time_quota_cmd cmd;
4146         int i, idx, ret, num_active_macs, quota, quota_rem;
4147         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4148         int n_ifs[IWM_MAX_BINDINGS] = {0, };
4149         uint16_t id;
4150
4151         memset(&cmd, 0, sizeof(cmd));
4152
4153         /* currently, PHY ID == binding ID */
4154         if (in) {
4155                 id = in->in_phyctxt->id;
4156                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4157                 colors[id] = in->in_phyctxt->color;
4158
4159                 if (1)
4160                         n_ifs[id] = 1;
4161         }
4162
4163         /*
4164          * The FW's scheduling session consists of
4165          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
4166          * equally between all the bindings that require quota
4167          */
4168         num_active_macs = 0;
4169         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4170                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4171                 num_active_macs += n_ifs[i];
4172         }
4173
4174         quota = 0;
4175         quota_rem = 0;
4176         if (num_active_macs) {
4177                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
4178                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
4179         }
4180
4181         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4182                 if (colors[i] < 0)
4183                         continue;
4184
4185                 cmd.quotas[idx].id_and_color =
4186                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4187
4188                 if (n_ifs[i] <= 0) {
4189                         cmd.quotas[idx].quota = htole32(0);
4190                         cmd.quotas[idx].max_duration = htole32(0);
4191                 } else {
4192                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4193                         cmd.quotas[idx].max_duration = htole32(0);
4194                 }
4195                 idx++;
4196         }
4197
4198         /* Give the remainder of the session to the first binding */
4199         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4200
4201         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4202             sizeof(cmd), &cmd);
4203         if (ret)
4204                 device_printf(sc->sc_dev,
4205                     "%s: Failed to send quota: %d\n", __func__, ret);
4206         return ret;
4207 }
4208
4209 /*
4210  * END mvm/quota.c
4211  */
4212
4213 /*
4214  * ieee80211 routines
4215  */
4216
4217 /*
4218  * Change to AUTH state in 80211 state machine.  Roughly matches what
4219  * Linux does in bss_info_changed().
4220  */
4221 static int
4222 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4223 {
4224         struct ieee80211_node *ni;
4225         struct iwm_node *in;
4226         struct iwm_vap *iv = IWM_VAP(vap);
4227         uint32_t duration;
4228         int error;
4229
4230         /*
4231          * XXX i have a feeling that the vap node is being
4232          * freed from underneath us. Grr.
4233          */
4234         ni = ieee80211_ref_node(vap->iv_bss);
4235         in = IWM_NODE(ni);
4236         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4237             "%s: called; vap=%p, bss ni=%p\n",
4238             __func__,
4239             vap,
4240             ni);
4241
4242         in->in_assoc = 0;
4243
4244         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4245         if (error != 0)
4246                 return error;
4247
4248         error = iwm_allow_mcast(vap, sc);
4249         if (error) {
4250                 device_printf(sc->sc_dev,
4251                     "%s: failed to set multicast\n", __func__);
4252                 goto out;
4253         }
4254
4255         /*
4256          * This is where it deviates from what Linux does.
4257          *
4258          * Linux iwlwifi doesn't reset the nic each time, nor does it
4259          * call ctxt_add() here.  Instead, it adds it during vap creation,
4260          * and always does a mac_ctx_changed().
4261          *
4262          * The openbsd port doesn't attempt to do that - it reset things
4263          * at odd states and does the add here.
4264          *
4265          * So, until the state handling is fixed (ie, we never reset
4266          * the NIC except for a firmware failure, which should drag
4267          * the NIC back to IDLE, re-setup and re-add all the mac/phy
4268          * contexts that are required), let's do a dirty hack here.
4269          */
4270         if (iv->is_uploaded) {
4271                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4272                         device_printf(sc->sc_dev,
4273                             "%s: failed to update MAC\n", __func__);
4274                         goto out;
4275                 }
4276                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4277                     in->in_ni.ni_chan, 1, 1)) != 0) {
4278                         device_printf(sc->sc_dev,
4279                             "%s: failed update phy ctxt\n", __func__);
4280                         goto out;
4281                 }
4282                 in->in_phyctxt = &sc->sc_phyctxt[0];
4283
4284                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
4285                         device_printf(sc->sc_dev,
4286                             "%s: binding update cmd\n", __func__);
4287                         goto out;
4288                 }
4289                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4290                         device_printf(sc->sc_dev,
4291                             "%s: failed to update sta\n", __func__);
4292                         goto out;
4293                 }
4294         } else {
4295                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4296                         device_printf(sc->sc_dev,
4297                             "%s: failed to add MAC\n", __func__);
4298                         goto out;
4299                 }
4300                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4301                     in->in_ni.ni_chan, 1, 1)) != 0) {
4302                         device_printf(sc->sc_dev,
4303                             "%s: failed add phy ctxt!\n", __func__);
4304                         error = ETIMEDOUT;
4305                         goto out;
4306                 }
4307                 in->in_phyctxt = &sc->sc_phyctxt[0];
4308
4309                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
4310                         device_printf(sc->sc_dev,
4311                             "%s: binding add cmd\n", __func__);
4312                         goto out;
4313                 }
4314                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4315                         device_printf(sc->sc_dev,
4316                             "%s: failed to add sta\n", __func__);
4317                         goto out;
4318                 }
4319         }
4320
4321         /*
4322          * Prevent the FW from wandering off channel during association
4323          * by "protecting" the session with a time event.
4324          */
4325         /* XXX duration is in units of TU, not MS */
4326         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4327         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
4328         DELAY(100);
4329
4330         error = 0;
4331 out:
4332         ieee80211_free_node(ni);
4333         return (error);
4334 }
4335
4336 static int
4337 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4338 {
4339         struct iwm_node *in = IWM_NODE(vap->iv_bss);
4340         int error;
4341
4342         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4343                 device_printf(sc->sc_dev,
4344                     "%s: failed to update STA\n", __func__);
4345                 return error;
4346         }
4347
4348         in->in_assoc = 1;
4349         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4350                 device_printf(sc->sc_dev,
4351                     "%s: failed to update MAC\n", __func__);
4352                 return error;
4353         }
4354
4355         return 0;
4356 }
4357
4358 static int
4359 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4360 {
4361         uint32_t tfd_msk;
4362
4363         /*
4364          * Ok, so *technically* the proper set of calls for going
4365          * from RUN back to SCAN is:
4366          *
4367          * iwm_mvm_power_mac_disable(sc, in);
4368          * iwm_mvm_mac_ctxt_changed(sc, in);
4369          * iwm_mvm_rm_sta(sc, in);
4370          * iwm_mvm_update_quotas(sc, NULL);
4371          * iwm_mvm_mac_ctxt_changed(sc, in);
4372          * iwm_mvm_binding_remove_vif(sc, in);
4373          * iwm_mvm_mac_ctxt_remove(sc, in);
4374          *
4375          * However, that freezes the device not matter which permutations
4376          * and modifications are attempted.  Obviously, this driver is missing
4377          * something since it works in the Linux driver, but figuring out what
4378          * is missing is a little more complicated.  Now, since we're going
4379          * back to nothing anyway, we'll just do a complete device reset.
4380          * Up your's, device!
4381          */
4382         /*
4383          * Just using 0xf for the queues mask is fine as long as we only
4384          * get here from RUN state.
4385          */
4386         tfd_msk = 0xf;
4387         mbufq_drain(&sc->sc_snd);
4388         iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4389         /*
4390          * We seem to get away with just synchronously sending the
4391          * IWM_TXPATH_FLUSH command.
4392          */
4393 //      iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4394         iwm_stop_device(sc);
4395         iwm_init_hw(sc);
4396         if (in)
4397                 in->in_assoc = 0;
4398         return 0;
4399
4400 #if 0
4401         int error;
4402
4403         iwm_mvm_power_mac_disable(sc, in);
4404
4405         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4406                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4407                 return error;
4408         }
4409
4410         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4411                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4412                 return error;
4413         }
4414         error = iwm_mvm_rm_sta(sc, in);
4415         in->in_assoc = 0;
4416         iwm_mvm_update_quotas(sc, NULL);
4417         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4418                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4419                 return error;
4420         }
4421         iwm_mvm_binding_remove_vif(sc, in);
4422
4423         iwm_mvm_mac_ctxt_remove(sc, in);
4424
4425         return error;
4426 #endif
4427 }
4428
4429 static struct ieee80211_node *
4430 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4431 {
4432         return malloc(sizeof (struct iwm_node), M_80211_NODE,
4433             M_NOWAIT | M_ZERO);
4434 }
4435
4436 static void
4437 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4438 {
4439         struct ieee80211_node *ni = &in->in_ni;
4440         struct iwm_lq_cmd *lq = &in->in_lq;
4441         int nrates = ni->ni_rates.rs_nrates;
4442         int i, ridx, tab = 0;
4443 //      int txant = 0;
4444
4445         if (nrates > nitems(lq->rs_table)) {
4446                 device_printf(sc->sc_dev,
4447                     "%s: node supports %d rates, driver handles "
4448                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4449                 return;
4450         }
4451         if (nrates == 0) {
4452                 device_printf(sc->sc_dev,
4453                     "%s: node supports 0 rates, odd!\n", __func__);
4454                 return;
4455         }
4456
4457         /*
4458          * XXX .. and most of iwm_node is not initialised explicitly;
4459          * it's all just 0x0 passed to the firmware.
4460          */
4461
4462         /* first figure out which rates we should support */
4463         /* XXX TODO: this isn't 11n aware /at all/ */
4464         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4465         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4466             "%s: nrates=%d\n", __func__, nrates);
4467
4468         /*
4469          * Loop over nrates and populate in_ridx from the highest
4470          * rate to the lowest rate.  Remember, in_ridx[] has
4471          * IEEE80211_RATE_MAXSIZE entries!
4472          */
4473         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4474                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4475
4476                 /* Map 802.11 rate to HW rate index. */
4477                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4478                         if (iwm_rates[ridx].rate == rate)
4479                                 break;
4480                 if (ridx > IWM_RIDX_MAX) {
4481                         device_printf(sc->sc_dev,
4482                             "%s: WARNING: device rate for %d not found!\n",
4483                             __func__, rate);
4484                 } else {
4485                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4486                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4487                             __func__,
4488                             i,
4489                             rate,
4490                             ridx);
4491                         in->in_ridx[i] = ridx;
4492                 }
4493         }
4494
4495         /* then construct a lq_cmd based on those */
4496         memset(lq, 0, sizeof(*lq));
4497         lq->sta_id = IWM_STATION_ID;
4498
4499         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4500         if (ni->ni_flags & IEEE80211_NODE_HT)
4501                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4502
4503         /*
4504          * are these used? (we don't do SISO or MIMO)
4505          * need to set them to non-zero, though, or we get an error.
4506          */
4507         lq->single_stream_ant_msk = 1;
4508         lq->dual_stream_ant_msk = 1;
4509
4510         /*
4511          * Build the actual rate selection table.
4512          * The lowest bits are the rates.  Additionally,
4513          * CCK needs bit 9 to be set.  The rest of the bits
4514          * we add to the table select the tx antenna
4515          * Note that we add the rates in the highest rate first
4516          * (opposite of ni_rates).
4517          */
4518         /*
4519          * XXX TODO: this should be looping over the min of nrates
4520          * and LQ_MAX_RETRY_NUM.  Sigh.
4521          */
4522         for (i = 0; i < nrates; i++) {
4523                 int nextant;
4524
4525 #if 0
4526                 if (txant == 0)
4527                         txant = iwm_mvm_get_valid_tx_ant(sc);
4528                 nextant = 1<<(ffs(txant)-1);
4529                 txant &= ~nextant;
4530 #else
4531                 nextant = iwm_mvm_get_valid_tx_ant(sc);
4532 #endif
4533                 /*
4534                  * Map the rate id into a rate index into
4535                  * our hardware table containing the
4536                  * configuration to use for this rate.
4537                  */
4538                 ridx = in->in_ridx[i];
4539                 tab = iwm_rates[ridx].plcp;
4540                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4541                 if (IWM_RIDX_IS_CCK(ridx))
4542                         tab |= IWM_RATE_MCS_CCK_MSK;
4543                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4544                     "station rate i=%d, rate=%d, hw=%x\n",
4545                     i, iwm_rates[ridx].rate, tab);
4546                 lq->rs_table[i] = htole32(tab);
4547         }
4548         /* then fill the rest with the lowest possible rate */
4549         for (i = nrates; i < nitems(lq->rs_table); i++) {
4550                 KASSERT(tab != 0, ("invalid tab"));
4551                 lq->rs_table[i] = htole32(tab);
4552         }
4553 }
4554
4555 static int
4556 iwm_media_change(struct ifnet *ifp)
4557 {
4558         struct ieee80211vap *vap = ifp->if_softc;
4559         struct ieee80211com *ic = vap->iv_ic;
4560         struct iwm_softc *sc = ic->ic_softc;
4561         int error;
4562
4563         error = ieee80211_media_change(ifp);
4564         if (error != ENETRESET)
4565                 return error;
4566
4567         IWM_LOCK(sc);
4568         if (ic->ic_nrunning > 0) {
4569                 iwm_stop(sc);
4570                 iwm_init(sc);
4571         }
4572         IWM_UNLOCK(sc);
4573         return error;
4574 }
4575
4576
4577 static int
4578 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4579 {
4580         struct iwm_vap *ivp = IWM_VAP(vap);
4581         struct ieee80211com *ic = vap->iv_ic;
4582         struct iwm_softc *sc = ic->ic_softc;
4583         struct iwm_node *in;
4584         int error;
4585
4586         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4587             "switching state %s -> %s\n",
4588             ieee80211_state_name[vap->iv_state],
4589             ieee80211_state_name[nstate]);
4590         IEEE80211_UNLOCK(ic);
4591         IWM_LOCK(sc);
4592
4593         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4594                 iwm_led_blink_stop(sc);
4595
4596         /* disable beacon filtering if we're hopping out of RUN */
4597         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4598                 iwm_mvm_disable_beacon_filter(sc);
4599
4600                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4601                         in->in_assoc = 0;
4602
4603                 if (nstate == IEEE80211_S_INIT) {
4604                         IWM_UNLOCK(sc);
4605                         IEEE80211_LOCK(ic);
4606                         error = ivp->iv_newstate(vap, nstate, arg);
4607                         IEEE80211_UNLOCK(ic);
4608                         IWM_LOCK(sc);
4609                         iwm_release(sc, NULL);
4610                         IWM_UNLOCK(sc);
4611                         IEEE80211_LOCK(ic);
4612                         return error;
4613                 }
4614
4615                 /*
4616                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4617                  * above then the card will be completely reinitialized,
4618                  * so the driver must do everything necessary to bring the card
4619                  * from INIT to SCAN.
4620                  *
4621                  * Additionally, upon receiving deauth frame from AP,
4622                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4623                  * state. This will also fail with this driver, so bring the FSM
4624                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4625                  *
4626                  * XXX TODO: fix this for FreeBSD!
4627                  */
4628                 if (nstate == IEEE80211_S_SCAN ||
4629                     nstate == IEEE80211_S_AUTH ||
4630                     nstate == IEEE80211_S_ASSOC) {
4631                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4632                             "Force transition to INIT; MGT=%d\n", arg);
4633                         IWM_UNLOCK(sc);
4634                         IEEE80211_LOCK(ic);
4635                         /* Always pass arg as -1 since we can't Tx right now. */
4636                         /*
4637                          * XXX arg is just ignored anyway when transitioning
4638                          *     to IEEE80211_S_INIT.
4639                          */
4640                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4641                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4642                             "Going INIT->SCAN\n");
4643                         nstate = IEEE80211_S_SCAN;
4644                         IEEE80211_UNLOCK(ic);
4645                         IWM_LOCK(sc);
4646                 }
4647         }
4648
4649         switch (nstate) {
4650         case IEEE80211_S_INIT:
4651                 break;
4652
4653         case IEEE80211_S_AUTH:
4654                 if ((error = iwm_auth(vap, sc)) != 0) {
4655                         device_printf(sc->sc_dev,
4656                             "%s: could not move to auth state: %d\n",
4657                             __func__, error);
4658                         break;
4659                 }
4660                 break;
4661
4662         case IEEE80211_S_ASSOC:
4663                 if ((error = iwm_assoc(vap, sc)) != 0) {
4664                         device_printf(sc->sc_dev,
4665                             "%s: failed to associate: %d\n", __func__,
4666                             error);
4667                         break;
4668                 }
4669                 break;
4670
4671         case IEEE80211_S_RUN:
4672         {
4673                 struct iwm_host_cmd cmd = {
4674                         .id = IWM_LQ_CMD,
4675                         .len = { sizeof(in->in_lq), },
4676                         .flags = IWM_CMD_SYNC,
4677                 };
4678
4679                 /* Update the association state, now we have it all */
4680                 /* (eg associd comes in at this point */
4681                 error = iwm_assoc(vap, sc);
4682                 if (error != 0) {
4683                         device_printf(sc->sc_dev,
4684                             "%s: failed to update association state: %d\n",
4685                             __func__,
4686                             error);
4687                         break;
4688                 }
4689
4690                 in = IWM_NODE(vap->iv_bss);
4691                 iwm_mvm_power_mac_update_mode(sc, in);
4692                 iwm_mvm_enable_beacon_filter(sc, in);
4693                 iwm_mvm_update_quotas(sc, in);
4694                 iwm_setrates(sc, in);
4695
4696                 cmd.data[0] = &in->in_lq;
4697                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4698                         device_printf(sc->sc_dev,
4699                             "%s: IWM_LQ_CMD failed\n", __func__);
4700                 }
4701
4702                 iwm_mvm_led_enable(sc);
4703                 break;
4704         }
4705
4706         default:
4707                 break;
4708         }
4709         IWM_UNLOCK(sc);
4710         IEEE80211_LOCK(ic);
4711
4712         return (ivp->iv_newstate(vap, nstate, arg));
4713 }
4714
4715 void
4716 iwm_endscan_cb(void *arg, int pending)
4717 {
4718         struct iwm_softc *sc = arg;
4719         struct ieee80211com *ic = &sc->sc_ic;
4720
4721         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4722             "%s: scan ended\n",
4723             __func__);
4724
4725         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4726 }
4727
4728 /*
4729  * Aging and idle timeouts for the different possible scenarios
4730  * in default configuration
4731  */
4732 static const uint32_t
4733 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4734         {
4735                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4736                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4737         },
4738         {
4739                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4740                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4741         },
4742         {
4743                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4744                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4745         },
4746         {
4747                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4748                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4749         },
4750         {
4751                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4752                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4753         },
4754 };
4755
4756 /*
4757  * Aging and idle timeouts for the different possible scenarios
4758  * in single BSS MAC configuration.
4759  */
4760 static const uint32_t
4761 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4762         {
4763                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4764                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4765         },
4766         {
4767                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4768                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4769         },
4770         {
4771                 htole32(IWM_SF_MCAST_AGING_TIMER),
4772                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4773         },
4774         {
4775                 htole32(IWM_SF_BA_AGING_TIMER),
4776                 htole32(IWM_SF_BA_IDLE_TIMER)
4777         },
4778         {
4779                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4780                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4781         },
4782 };
4783
4784 static void
4785 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4786     struct ieee80211_node *ni)
4787 {
4788         int i, j, watermark;
4789
4790         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4791
4792         /*
4793          * If we are in association flow - check antenna configuration
4794          * capabilities of the AP station, and choose the watermark accordingly.
4795          */
4796         if (ni) {
4797                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4798 #ifdef notyet
4799                         if (ni->ni_rxmcs[2] != 0)
4800                                 watermark = IWM_SF_W_MARK_MIMO3;
4801                         else if (ni->ni_rxmcs[1] != 0)
4802                                 watermark = IWM_SF_W_MARK_MIMO2;
4803                         else
4804 #endif
4805                                 watermark = IWM_SF_W_MARK_SISO;
4806                 } else {
4807                         watermark = IWM_SF_W_MARK_LEGACY;
4808                 }
4809         /* default watermark value for unassociated mode. */
4810         } else {
4811                 watermark = IWM_SF_W_MARK_MIMO2;
4812         }
4813         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4814
4815         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4816                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4817                         sf_cmd->long_delay_timeouts[i][j] =
4818                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4819                 }
4820         }
4821
4822         if (ni) {
4823                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4824                        sizeof(iwm_sf_full_timeout));
4825         } else {
4826                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4827                        sizeof(iwm_sf_full_timeout_def));
4828         }
4829 }
4830
4831 static int
4832 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4833 {
4834         struct ieee80211com *ic = &sc->sc_ic;
4835         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4836         struct iwm_sf_cfg_cmd sf_cmd = {
4837                 .state = htole32(IWM_SF_FULL_ON),
4838         };
4839         int ret = 0;
4840
4841         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4842                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4843
4844         switch (new_state) {
4845         case IWM_SF_UNINIT:
4846         case IWM_SF_INIT_OFF:
4847                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4848                 break;
4849         case IWM_SF_FULL_ON:
4850                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4851                 break;
4852         default:
4853                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4854                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4855                           new_state);
4856                 return EINVAL;
4857         }
4858
4859         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4860                                    sizeof(sf_cmd), &sf_cmd);
4861         return ret;
4862 }
4863
4864 static int
4865 iwm_send_bt_init_conf(struct iwm_softc *sc)
4866 {
4867         struct iwm_bt_coex_cmd bt_cmd;
4868
4869         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4870         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4871
4872         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4873             &bt_cmd);
4874 }
4875
4876 static int
4877 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4878 {
4879         struct iwm_mcc_update_cmd mcc_cmd;
4880         struct iwm_host_cmd hcmd = {
4881                 .id = IWM_MCC_UPDATE_CMD,
4882                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4883                 .data = { &mcc_cmd },
4884         };
4885         int ret;
4886 #ifdef IWM_DEBUG
4887         struct iwm_rx_packet *pkt;
4888         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4889         struct iwm_mcc_update_resp *mcc_resp;
4890         int n_channels;
4891         uint16_t mcc;
4892 #endif
4893         int resp_v2 = isset(sc->sc_enabled_capa,
4894             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4895
4896         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4897         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4898         if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4899             isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4900                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4901         else
4902                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4903
4904         if (resp_v2)
4905                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4906         else
4907                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4908
4909         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4910             "send MCC update to FW with '%c%c' src = %d\n",
4911             alpha2[0], alpha2[1], mcc_cmd.source_id);
4912
4913         ret = iwm_send_cmd(sc, &hcmd);
4914         if (ret)
4915                 return ret;
4916
4917 #ifdef IWM_DEBUG
4918         pkt = hcmd.resp_pkt;
4919
4920         /* Extract MCC response */
4921         if (resp_v2) {
4922                 mcc_resp = (void *)pkt->data;
4923                 mcc = mcc_resp->mcc;
4924                 n_channels =  le32toh(mcc_resp->n_channels);
4925         } else {
4926                 mcc_resp_v1 = (void *)pkt->data;
4927                 mcc = mcc_resp_v1->mcc;
4928                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4929         }
4930
4931         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4932         if (mcc == 0)
4933                 mcc = 0x3030;  /* "00" - world */
4934
4935         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4936             "regulatory domain '%c%c' (%d channels available)\n",
4937             mcc >> 8, mcc & 0xff, n_channels);
4938 #endif
4939         iwm_free_resp(sc, &hcmd);
4940
4941         return 0;
4942 }
4943
4944 static void
4945 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4946 {
4947         struct iwm_host_cmd cmd = {
4948                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4949                 .len = { sizeof(uint32_t), },
4950                 .data = { &backoff, },
4951         };
4952
4953         if (iwm_send_cmd(sc, &cmd) != 0) {
4954                 device_printf(sc->sc_dev,
4955                     "failed to change thermal tx backoff\n");
4956         }
4957 }
4958
4959 static int
4960 iwm_init_hw(struct iwm_softc *sc)
4961 {
4962         struct ieee80211com *ic = &sc->sc_ic;
4963         int error, i, ac;
4964
4965         if ((error = iwm_start_hw(sc)) != 0) {
4966                 printf("iwm_start_hw: failed %d\n", error);
4967                 return error;
4968         }
4969
4970         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4971                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4972                 return error;
4973         }
4974
4975         /*
4976          * should stop and start HW since that INIT
4977          * image just loaded
4978          */
4979         iwm_stop_device(sc);
4980         if ((error = iwm_start_hw(sc)) != 0) {
4981                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4982                 return error;
4983         }
4984
4985         /* omstart, this time with the regular firmware */
4986         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4987         if (error) {
4988                 device_printf(sc->sc_dev, "could not load firmware\n");
4989                 goto error;
4990         }
4991
4992         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4993                 device_printf(sc->sc_dev, "bt init conf failed\n");
4994                 goto error;
4995         }
4996
4997         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4998         if (error != 0) {
4999                 device_printf(sc->sc_dev, "antenna config failed\n");
5000                 goto error;
5001         }
5002
5003         /* Send phy db control command and then phy db calibration */
5004         if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
5005                 goto error;
5006
5007         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
5008                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
5009                 goto error;
5010         }
5011
5012         /* Add auxiliary station for scanning */
5013         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
5014                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
5015                 goto error;
5016         }
5017
5018         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
5019                 /*
5020                  * The channel used here isn't relevant as it's
5021                  * going to be overwritten in the other flows.
5022                  * For now use the first channel we have.
5023                  */
5024                 if ((error = iwm_mvm_phy_ctxt_add(sc,
5025                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
5026                         goto error;
5027         }
5028
5029         /* Initialize tx backoffs to the minimum. */
5030         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
5031                 iwm_mvm_tt_tx_backoff(sc, 0);
5032
5033         error = iwm_mvm_power_update_device(sc);
5034         if (error)
5035                 goto error;
5036
5037         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
5038                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
5039                         goto error;
5040         }
5041
5042         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
5043                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
5044                         goto error;
5045         }
5046
5047         /* Enable Tx queues. */
5048         for (ac = 0; ac < WME_NUM_AC; ac++) {
5049                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
5050                     iwm_mvm_ac_to_tx_fifo[ac]);
5051                 if (error)
5052                         goto error;
5053         }
5054
5055         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
5056                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
5057                 goto error;
5058         }
5059
5060         return 0;
5061
5062  error:
5063         iwm_stop_device(sc);
5064         return error;
5065 }
5066
5067 /* Allow multicast from our BSSID. */
5068 static int
5069 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
5070 {
5071         struct ieee80211_node *ni = vap->iv_bss;
5072         struct iwm_mcast_filter_cmd *cmd;
5073         size_t size;
5074         int error;
5075
5076         size = roundup(sizeof(*cmd), 4);
5077         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
5078         if (cmd == NULL)
5079                 return ENOMEM;
5080         cmd->filter_own = 1;
5081         cmd->port_id = 0;
5082         cmd->count = 0;
5083         cmd->pass_all = 1;
5084         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
5085
5086         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
5087             IWM_CMD_SYNC, size, cmd);
5088         free(cmd, M_DEVBUF);
5089
5090         return (error);
5091 }
5092
5093 /*
5094  * ifnet interfaces
5095  */
5096
5097 static void
5098 iwm_init(struct iwm_softc *sc)
5099 {
5100         int error;
5101
5102         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
5103                 return;
5104         }
5105         sc->sc_generation++;
5106         sc->sc_flags &= ~IWM_FLAG_STOPPED;
5107
5108         if ((error = iwm_init_hw(sc)) != 0) {
5109                 printf("iwm_init_hw failed %d\n", error);
5110                 iwm_stop(sc);
5111                 return;
5112         }
5113
5114         /*
5115          * Ok, firmware loaded and we are jogging
5116          */
5117         sc->sc_flags |= IWM_FLAG_HW_INITED;
5118         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5119 }
5120
5121 static int
5122 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
5123 {
5124         struct iwm_softc *sc;
5125         int error;
5126
5127         sc = ic->ic_softc;
5128
5129         IWM_LOCK(sc);
5130         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
5131                 IWM_UNLOCK(sc);
5132                 return (ENXIO);
5133         }
5134         error = mbufq_enqueue(&sc->sc_snd, m);
5135         if (error) {
5136                 IWM_UNLOCK(sc);
5137                 return (error);
5138         }
5139         iwm_start(sc);
5140         IWM_UNLOCK(sc);
5141         return (0);
5142 }
5143
5144 /*
5145  * Dequeue packets from sendq and call send.
5146  */
5147 static void
5148 iwm_start(struct iwm_softc *sc)
5149 {
5150         struct ieee80211_node *ni;
5151         struct mbuf *m;
5152         int ac = 0;
5153
5154         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5155         while (sc->qfullmsk == 0 &&
5156                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5157                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5158                 if (iwm_tx(sc, m, ni, ac) != 0) {
5159                         if_inc_counter(ni->ni_vap->iv_ifp,
5160                             IFCOUNTER_OERRORS, 1);
5161                         ieee80211_free_node(ni);
5162                         continue;
5163                 }
5164                 sc->sc_tx_timer = 15;
5165         }
5166         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5167 }
5168
5169 static void
5170 iwm_stop(struct iwm_softc *sc)
5171 {
5172
5173         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5174         sc->sc_flags |= IWM_FLAG_STOPPED;
5175         sc->sc_generation++;
5176         iwm_led_blink_stop(sc);
5177         sc->sc_tx_timer = 0;
5178         iwm_stop_device(sc);
5179         sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5180 }
5181
5182 static void
5183 iwm_watchdog(void *arg)
5184 {
5185         struct iwm_softc *sc = arg;
5186         struct ieee80211com *ic = &sc->sc_ic;
5187
5188         if (sc->sc_tx_timer > 0) {
5189                 if (--sc->sc_tx_timer == 0) {
5190                         device_printf(sc->sc_dev, "device timeout\n");
5191 #ifdef IWM_DEBUG
5192                         iwm_nic_error(sc);
5193 #endif
5194                         ieee80211_restart_all(ic);
5195                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5196                         return;
5197                 }
5198         }
5199         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5200 }
5201
5202 static void
5203 iwm_parent(struct ieee80211com *ic)
5204 {
5205         struct iwm_softc *sc = ic->ic_softc;
5206         int startall = 0;
5207
5208         IWM_LOCK(sc);
5209         if (ic->ic_nrunning > 0) {
5210                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5211                         iwm_init(sc);
5212                         startall = 1;
5213                 }
5214         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5215                 iwm_stop(sc);
5216         IWM_UNLOCK(sc);
5217         if (startall)
5218                 ieee80211_start_all(ic);
5219 }
5220
5221 /*
5222  * The interrupt side of things
5223  */
5224
5225 /*
5226  * error dumping routines are from iwlwifi/mvm/utils.c
5227  */
5228
5229 /*
5230  * Note: This structure is read from the device with IO accesses,
5231  * and the reading already does the endian conversion. As it is
5232  * read with uint32_t-sized accesses, any members with a different size
5233  * need to be ordered correctly though!
5234  */
5235 struct iwm_error_event_table {
5236         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5237         uint32_t error_id;              /* type of error */
5238         uint32_t trm_hw_status0;        /* TRM HW status */
5239         uint32_t trm_hw_status1;        /* TRM HW status */
5240         uint32_t blink2;                /* branch link */
5241         uint32_t ilink1;                /* interrupt link */
5242         uint32_t ilink2;                /* interrupt link */
5243         uint32_t data1;         /* error-specific data */
5244         uint32_t data2;         /* error-specific data */
5245         uint32_t data3;         /* error-specific data */
5246         uint32_t bcon_time;             /* beacon timer */
5247         uint32_t tsf_low;               /* network timestamp function timer */
5248         uint32_t tsf_hi;                /* network timestamp function timer */
5249         uint32_t gp1;           /* GP1 timer register */
5250         uint32_t gp2;           /* GP2 timer register */
5251         uint32_t fw_rev_type;   /* firmware revision type */
5252         uint32_t major;         /* uCode version major */
5253         uint32_t minor;         /* uCode version minor */
5254         uint32_t hw_ver;                /* HW Silicon version */
5255         uint32_t brd_ver;               /* HW board version */
5256         uint32_t log_pc;                /* log program counter */
5257         uint32_t frame_ptr;             /* frame pointer */
5258         uint32_t stack_ptr;             /* stack pointer */
5259         uint32_t hcmd;          /* last host command header */
5260         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
5261                                  * rxtx_flag */
5262         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
5263                                  * host_flag */
5264         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
5265                                  * enc_flag */
5266         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
5267                                  * time_flag */
5268         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
5269                                  * wico interrupt */
5270         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
5271         uint32_t wait_event;            /* wait event() caller address */
5272         uint32_t l2p_control;   /* L2pControlField */
5273         uint32_t l2p_duration;  /* L2pDurationField */
5274         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
5275         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
5276         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
5277                                  * (LMPM_PMG_SEL) */
5278         uint32_t u_timestamp;   /* indicate when the date and time of the
5279                                  * compilation */
5280         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
5281 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5282
5283 /*
5284  * UMAC error struct - relevant starting from family 8000 chip.
5285  * Note: This structure is read from the device with IO accesses,
5286  * and the reading already does the endian conversion. As it is
5287  * read with u32-sized accesses, any members with a different size
5288  * need to be ordered correctly though!
5289  */
5290 struct iwm_umac_error_event_table {
5291         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5292         uint32_t error_id;      /* type of error */
5293         uint32_t blink1;        /* branch link */
5294         uint32_t blink2;        /* branch link */
5295         uint32_t ilink1;        /* interrupt link */
5296         uint32_t ilink2;        /* interrupt link */
5297         uint32_t data1;         /* error-specific data */
5298         uint32_t data2;         /* error-specific data */
5299         uint32_t data3;         /* error-specific data */
5300         uint32_t umac_major;
5301         uint32_t umac_minor;
5302         uint32_t frame_pointer; /* core register 27*/
5303         uint32_t stack_pointer; /* core register 28 */
5304         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
5305         uint32_t nic_isr_pref;  /* ISR status register */
5306 } __packed;
5307
5308 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5309 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5310
5311 #ifdef IWM_DEBUG
5312 struct {
5313         const char *name;
5314         uint8_t num;
5315 } advanced_lookup[] = {
5316         { "NMI_INTERRUPT_WDG", 0x34 },
5317         { "SYSASSERT", 0x35 },
5318         { "UCODE_VERSION_MISMATCH", 0x37 },
5319         { "BAD_COMMAND", 0x38 },
5320         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5321         { "FATAL_ERROR", 0x3D },
5322         { "NMI_TRM_HW_ERR", 0x46 },
5323         { "NMI_INTERRUPT_TRM", 0x4C },
5324         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5325         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5326         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5327         { "NMI_INTERRUPT_HOST", 0x66 },
5328         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5329         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5330         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5331         { "ADVANCED_SYSASSERT", 0 },
5332 };
5333
5334 static const char *
5335 iwm_desc_lookup(uint32_t num)
5336 {
5337         int i;
5338
5339         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5340                 if (advanced_lookup[i].num == num)
5341                         return advanced_lookup[i].name;
5342
5343         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5344         return advanced_lookup[i].name;
5345 }
5346
5347 static void
5348 iwm_nic_umac_error(struct iwm_softc *sc)
5349 {
5350         struct iwm_umac_error_event_table table;
5351         uint32_t base;
5352
5353         base = sc->umac_error_event_table;
5354
5355         if (base < 0x800000) {
5356                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5357                     base);
5358                 return;
5359         }
5360
5361         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5362                 device_printf(sc->sc_dev, "reading errlog failed\n");
5363                 return;
5364         }
5365
5366         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5367                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5368                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5369                     sc->sc_flags, table.valid);
5370         }
5371
5372         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5373                 iwm_desc_lookup(table.error_id));
5374         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5375         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5376         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5377             table.ilink1);
5378         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5379             table.ilink2);
5380         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5381         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5382         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5383         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5384         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5385         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5386             table.frame_pointer);
5387         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5388             table.stack_pointer);
5389         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5390         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5391             table.nic_isr_pref);
5392 }
5393
5394 /*
5395  * Support for dumping the error log seemed like a good idea ...
5396  * but it's mostly hex junk and the only sensible thing is the
5397  * hw/ucode revision (which we know anyway).  Since it's here,
5398  * I'll just leave it in, just in case e.g. the Intel guys want to
5399  * help us decipher some "ADVANCED_SYSASSERT" later.
5400  */
5401 static void
5402 iwm_nic_error(struct iwm_softc *sc)
5403 {
5404         struct iwm_error_event_table table;
5405         uint32_t base;
5406
5407         device_printf(sc->sc_dev, "dumping device error log\n");
5408         base = sc->error_event_table;
5409         if (base < 0x800000) {
5410                 device_printf(sc->sc_dev,
5411                     "Invalid error log pointer 0x%08x\n", base);
5412                 return;
5413         }
5414
5415         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5416                 device_printf(sc->sc_dev, "reading errlog failed\n");
5417                 return;
5418         }
5419
5420         if (!table.valid) {
5421                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5422                 return;
5423         }
5424
5425         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5426                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5427                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5428                     sc->sc_flags, table.valid);
5429         }
5430
5431         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5432             iwm_desc_lookup(table.error_id));
5433         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5434             table.trm_hw_status0);
5435         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5436             table.trm_hw_status1);
5437         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5438         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5439         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5440         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5441         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5442         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5443         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5444         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5445         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5446         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5447         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5448         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5449             table.fw_rev_type);
5450         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5451         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5452         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5453         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5454         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5455         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5456         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5457         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5458         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5459         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5460         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5461         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5462         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5463         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5464         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5465         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5466         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5467         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5468         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5469
5470         if (sc->umac_error_event_table)
5471                 iwm_nic_umac_error(sc);
5472 }
5473 #endif
5474
5475 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5476
5477 /*
5478  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5479  * Basic structure from if_iwn
5480  */
5481 static void
5482 iwm_notif_intr(struct iwm_softc *sc)
5483 {
5484         struct ieee80211com *ic = &sc->sc_ic;
5485         uint16_t hw;
5486
5487         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5488             BUS_DMASYNC_POSTREAD);
5489
5490         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5491
5492         /*
5493          * Process responses
5494          */
5495         while (sc->rxq.cur != hw) {
5496                 struct iwm_rx_ring *ring = &sc->rxq;
5497                 struct iwm_rx_data *data = &ring->data[ring->cur];
5498                 struct iwm_rx_packet *pkt;
5499                 struct iwm_cmd_response *cresp;
5500                 int qid, idx, code;
5501
5502                 bus_dmamap_sync(ring->data_dmat, data->map,
5503                     BUS_DMASYNC_POSTREAD);
5504                 pkt = mtod(data->m, struct iwm_rx_packet *);
5505
5506                 qid = pkt->hdr.qid & ~0x80;
5507                 idx = pkt->hdr.idx;
5508
5509                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5510                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5511                     "rx packet qid=%d idx=%d type=%x %d %d\n",
5512                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5513
5514                 /*
5515                  * randomly get these from the firmware, no idea why.
5516                  * they at least seem harmless, so just ignore them for now
5517                  */
5518                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5519                     || pkt->len_n_flags == htole32(0x55550000))) {
5520                         ADVANCE_RXQ(sc);
5521                         continue;
5522                 }
5523
5524                 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5525
5526                 switch (code) {
5527                 case IWM_REPLY_RX_PHY_CMD:
5528                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5529                         break;
5530
5531                 case IWM_REPLY_RX_MPDU_CMD:
5532                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5533                         break;
5534
5535                 case IWM_TX_CMD:
5536                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
5537                         break;
5538
5539                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5540                         struct iwm_missed_beacons_notif *resp;
5541                         int missed;
5542
5543                         /* XXX look at mac_id to determine interface ID */
5544                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5545
5546                         resp = (void *)pkt->data;
5547                         missed = le32toh(resp->consec_missed_beacons);
5548
5549                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5550                             "%s: MISSED_BEACON: mac_id=%d, "
5551                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5552                             "num_rx=%d\n",
5553                             __func__,
5554                             le32toh(resp->mac_id),
5555                             le32toh(resp->consec_missed_beacons_since_last_rx),
5556                             le32toh(resp->consec_missed_beacons),
5557                             le32toh(resp->num_expected_beacons),
5558                             le32toh(resp->num_recvd_beacons));
5559
5560                         /* Be paranoid */
5561                         if (vap == NULL)
5562                                 break;
5563
5564                         /* XXX no net80211 locking? */
5565                         if (vap->iv_state == IEEE80211_S_RUN &&
5566                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5567                                 if (missed > vap->iv_bmissthreshold) {
5568                                         /* XXX bad locking; turn into task */
5569                                         IWM_UNLOCK(sc);
5570                                         ieee80211_beacon_miss(ic);
5571                                         IWM_LOCK(sc);
5572                                 }
5573                         }
5574
5575                         break; }
5576
5577                 case IWM_MFUART_LOAD_NOTIFICATION:
5578                         break;
5579
5580                 case IWM_MVM_ALIVE:
5581                         break;
5582
5583                 case IWM_CALIB_RES_NOTIF_PHY_DB:
5584                         break;
5585
5586                 case IWM_STATISTICS_NOTIFICATION: {
5587                         struct iwm_notif_statistics *stats;
5588                         stats = (void *)pkt->data;
5589                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5590                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5591                         break; }
5592
5593                 case IWM_NVM_ACCESS_CMD:
5594                 case IWM_MCC_UPDATE_CMD:
5595                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5596                                 memcpy(sc->sc_cmd_resp,
5597                                     pkt, sizeof(sc->sc_cmd_resp));
5598                         }
5599                         break;
5600
5601                 case IWM_MCC_CHUB_UPDATE_CMD: {
5602                         struct iwm_mcc_chub_notif *notif;
5603                         notif = (void *)pkt->data;
5604
5605                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5606                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5607                         sc->sc_fw_mcc[2] = '\0';
5608                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5609                             "fw source %d sent CC '%s'\n",
5610                             notif->source_id, sc->sc_fw_mcc);
5611                         break; }
5612
5613                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5614                 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5615                                  IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5616                         struct iwm_dts_measurement_notif_v1 *notif;
5617
5618                         if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5619                                 device_printf(sc->sc_dev,
5620                                     "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5621                                 break;
5622                         }
5623                         notif = (void *)pkt->data;
5624                         IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5625                             "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5626                             notif->temp);
5627                         break;
5628                 }
5629
5630                 case IWM_PHY_CONFIGURATION_CMD:
5631                 case IWM_TX_ANT_CONFIGURATION_CMD:
5632                 case IWM_ADD_STA:
5633                 case IWM_MAC_CONTEXT_CMD:
5634                 case IWM_REPLY_SF_CFG_CMD:
5635                 case IWM_POWER_TABLE_CMD:
5636                 case IWM_PHY_CONTEXT_CMD:
5637                 case IWM_BINDING_CONTEXT_CMD:
5638                 case IWM_TIME_EVENT_CMD:
5639                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5640                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5641                 case IWM_SCAN_ABORT_UMAC:
5642                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5643                 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5644                 case IWM_REPLY_BEACON_FILTERING_CMD:
5645                 case IWM_MAC_PM_POWER_TABLE:
5646                 case IWM_TIME_QUOTA_CMD:
5647                 case IWM_REMOVE_STA:
5648                 case IWM_TXPATH_FLUSH:
5649                 case IWM_LQ_CMD:
5650                 case IWM_BT_CONFIG:
5651                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5652                         cresp = (void *)pkt->data;
5653                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5654                                 memcpy(sc->sc_cmd_resp,
5655                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5656                         }
5657                         break;
5658
5659                 /* ignore */
5660                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5661                         break;
5662
5663                 case IWM_INIT_COMPLETE_NOTIF:
5664                         break;
5665
5666                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5667                         struct iwm_periodic_scan_complete *notif;
5668                         notif = (void *)pkt->data;
5669                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5670                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5671                                 ieee80211_runtask(ic, &sc->sc_es_task);
5672                         }
5673                         break;
5674                 }
5675
5676                 case IWM_SCAN_ITERATION_COMPLETE: {
5677                         struct iwm_lmac_scan_complete_notif *notif;
5678                         notif = (void *)pkt->data;
5679                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5680                         break;
5681                 }
5682  
5683                 case IWM_SCAN_COMPLETE_UMAC: {
5684                         struct iwm_umac_scan_complete *notif;
5685                         notif = (void *)pkt->data;
5686
5687                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5688                             "UMAC scan complete, status=0x%x\n",
5689                             notif->status);
5690                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5691                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5692                                 ieee80211_runtask(ic, &sc->sc_es_task);
5693                         }
5694                         break;
5695                 }
5696
5697                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5698                         struct iwm_umac_scan_iter_complete_notif *notif;
5699                         notif = (void *)pkt->data;
5700
5701                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5702                             "complete, status=0x%x, %d channels scanned\n",
5703                             notif->status, notif->scanned_channels);
5704                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5705                         break;
5706                 }
5707
5708                 case IWM_REPLY_ERROR: {
5709                         struct iwm_error_resp *resp;
5710                         resp = (void *)pkt->data;
5711
5712                         device_printf(sc->sc_dev,
5713                             "firmware error 0x%x, cmd 0x%x\n",
5714                             le32toh(resp->error_type),
5715                             resp->cmd_id);
5716                         break;
5717                 }
5718
5719                 case IWM_TIME_EVENT_NOTIFICATION: {
5720                         struct iwm_time_event_notif *notif;
5721                         notif = (void *)pkt->data;
5722
5723                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5724                             "TE notif status = 0x%x action = 0x%x\n",
5725                             notif->status, notif->action);
5726                         break;
5727                 }
5728
5729                 case IWM_MCAST_FILTER_CMD:
5730                         break;
5731
5732                 case IWM_SCD_QUEUE_CFG: {
5733                         struct iwm_scd_txq_cfg_rsp *rsp;
5734                         rsp = (void *)pkt->data;
5735
5736                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5737                             "queue cfg token=0x%x sta_id=%d "
5738                             "tid=%d scd_queue=%d\n",
5739                             rsp->token, rsp->sta_id, rsp->tid,
5740                             rsp->scd_queue);
5741                         break;
5742                 }
5743
5744                 default:
5745                         device_printf(sc->sc_dev,
5746                             "frame %d/%d %x UNHANDLED (this should "
5747                             "not happen)\n", qid, idx,
5748                             pkt->len_n_flags);
5749                         break;
5750                 }
5751
5752                 /*
5753                  * Why test bit 0x80?  The Linux driver:
5754                  *
5755                  * There is one exception:  uCode sets bit 15 when it
5756                  * originates the response/notification, i.e. when the
5757                  * response/notification is not a direct response to a
5758                  * command sent by the driver.  For example, uCode issues
5759                  * IWM_REPLY_RX when it sends a received frame to the driver;
5760                  * it is not a direct response to any driver command.
5761                  *
5762                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5763                  * uses a slightly different format for pkt->hdr, and "qid"
5764                  * is actually the upper byte of a two-byte field.
5765                  */
5766                 if (!(pkt->hdr.qid & (1 << 7))) {
5767                         iwm_cmd_done(sc, pkt);
5768                 }
5769
5770                 ADVANCE_RXQ(sc);
5771         }
5772
5773         /*
5774          * Tell the firmware what we have processed.
5775          * Seems like the hardware gets upset unless we align
5776          * the write by 8??
5777          */
5778         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5779         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5780 }
5781
5782 static void
5783 iwm_intr(void *arg)
5784 {
5785         struct iwm_softc *sc = arg;
5786         int handled = 0;
5787         int r1, r2, rv = 0;
5788         int isperiodic = 0;
5789
5790         IWM_LOCK(sc);
5791         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5792
5793         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5794                 uint32_t *ict = sc->ict_dma.vaddr;
5795                 int tmp;
5796
5797                 tmp = htole32(ict[sc->ict_cur]);
5798                 if (!tmp)
5799                         goto out_ena;
5800
5801                 /*
5802                  * ok, there was something.  keep plowing until we have all.
5803                  */
5804                 r1 = r2 = 0;
5805                 while (tmp) {
5806                         r1 |= tmp;
5807                         ict[sc->ict_cur] = 0;
5808                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5809                         tmp = htole32(ict[sc->ict_cur]);
5810                 }
5811
5812                 /* this is where the fun begins.  don't ask */
5813                 if (r1 == 0xffffffff)
5814                         r1 = 0;
5815
5816                 /* i am not expected to understand this */
5817                 if (r1 & 0xc0000)
5818                         r1 |= 0x8000;
5819                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5820         } else {
5821                 r1 = IWM_READ(sc, IWM_CSR_INT);
5822                 /* "hardware gone" (where, fishing?) */
5823                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5824                         goto out;
5825                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5826         }
5827         if (r1 == 0 && r2 == 0) {
5828                 goto out_ena;
5829         }
5830
5831         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5832
5833         /* Safely ignore these bits for debug checks below */
5834         r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5835
5836         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5837                 int i;
5838                 struct ieee80211com *ic = &sc->sc_ic;
5839                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5840
5841 #ifdef IWM_DEBUG
5842                 iwm_nic_error(sc);
5843 #endif
5844                 /* Dump driver status (TX and RX rings) while we're here. */
5845                 device_printf(sc->sc_dev, "driver status:\n");
5846                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5847                         struct iwm_tx_ring *ring = &sc->txq[i];
5848                         device_printf(sc->sc_dev,
5849                             "  tx ring %2d: qid=%-2d cur=%-3d "
5850                             "queued=%-3d\n",
5851                             i, ring->qid, ring->cur, ring->queued);
5852                 }
5853                 device_printf(sc->sc_dev,
5854                     "  rx ring: cur=%d\n", sc->rxq.cur);
5855                 device_printf(sc->sc_dev,
5856                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5857
5858                 /* Don't stop the device; just do a VAP restart */
5859                 IWM_UNLOCK(sc);
5860
5861                 if (vap == NULL) {
5862                         printf("%s: null vap\n", __func__);
5863                         return;
5864                 }
5865
5866                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5867                     "restarting\n", __func__, vap->iv_state);
5868
5869                 /* XXX TODO: turn this into a callout/taskqueue */
5870                 ieee80211_restart_all(ic);
5871                 return;
5872         }
5873
5874         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5875                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5876                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5877                 iwm_stop(sc);
5878                 rv = 1;
5879                 goto out;
5880         }
5881
5882         /* firmware chunk loaded */
5883         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5884                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5885                 handled |= IWM_CSR_INT_BIT_FH_TX;
5886                 sc->sc_fw_chunk_done = 1;
5887                 wakeup(&sc->sc_fw);
5888         }
5889
5890         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5891                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5892                 if (iwm_check_rfkill(sc)) {
5893                         device_printf(sc->sc_dev,
5894                             "%s: rfkill switch, disabling interface\n",
5895                             __func__);
5896                         iwm_stop(sc);
5897                 }
5898         }
5899
5900         /*
5901          * The Linux driver uses periodic interrupts to avoid races.
5902          * We cargo-cult like it's going out of fashion.
5903          */
5904         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5905                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5906                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5907                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5908                         IWM_WRITE_1(sc,
5909                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5910                 isperiodic = 1;
5911         }
5912
5913         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5914                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5915                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5916
5917                 iwm_notif_intr(sc);
5918
5919                 /* enable periodic interrupt, see above */
5920                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5921                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5922                             IWM_CSR_INT_PERIODIC_ENA);
5923         }
5924
5925         if (__predict_false(r1 & ~handled))
5926                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5927                     "%s: unhandled interrupts: %x\n", __func__, r1);
5928         rv = 1;
5929
5930  out_ena:
5931         iwm_restore_interrupts(sc);
5932  out:
5933         IWM_UNLOCK(sc);
5934         return;
5935 }
5936
5937 /*
5938  * Autoconf glue-sniffing
5939  */
5940 #define PCI_VENDOR_INTEL                0x8086
5941 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5942 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5943 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5944 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5945 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5946 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5947 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5948 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5949 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5950 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5951
5952 static const struct iwm_devices {
5953         uint16_t        device;
5954         const char      *name;
5955 } iwm_devices[] = {
5956         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5957         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5958         { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5959         { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5960         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5961         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5962         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5963         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5964         { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5965         { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5966 };
5967
5968 static int
5969 iwm_probe(device_t dev)
5970 {
5971         int i;
5972
5973         for (i = 0; i < nitems(iwm_devices); i++) {
5974                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5975                     pci_get_device(dev) == iwm_devices[i].device) {
5976                         device_set_desc(dev, iwm_devices[i].name);
5977                         return (BUS_PROBE_DEFAULT);
5978                 }
5979         }
5980
5981         return (ENXIO);
5982 }
5983
5984 static int
5985 iwm_dev_check(device_t dev)
5986 {
5987         struct iwm_softc *sc;
5988
5989         sc = device_get_softc(dev);
5990
5991         switch (pci_get_device(dev)) {
5992         case PCI_PRODUCT_INTEL_WL_3160_1:
5993         case PCI_PRODUCT_INTEL_WL_3160_2:
5994                 sc->cfg = &iwm3160_cfg;
5995                 return (0);
5996         case PCI_PRODUCT_INTEL_WL_3165_1:
5997         case PCI_PRODUCT_INTEL_WL_3165_2:
5998                 sc->cfg = &iwm3165_cfg;
5999                 return (0);
6000         case PCI_PRODUCT_INTEL_WL_7260_1:
6001         case PCI_PRODUCT_INTEL_WL_7260_2:
6002                 sc->cfg = &iwm7260_cfg;
6003                 return (0);
6004         case PCI_PRODUCT_INTEL_WL_7265_1:
6005         case PCI_PRODUCT_INTEL_WL_7265_2:
6006                 sc->cfg = &iwm7265_cfg;
6007                 return (0);
6008         case PCI_PRODUCT_INTEL_WL_8260_1:
6009         case PCI_PRODUCT_INTEL_WL_8260_2:
6010                 sc->cfg = &iwm8260_cfg;
6011                 return (0);
6012         default:
6013                 device_printf(dev, "unknown adapter type\n");
6014                 return ENXIO;
6015         }
6016 }
6017
6018 /* PCI registers */
6019 #define PCI_CFG_RETRY_TIMEOUT   0x041
6020
6021 static int
6022 iwm_pci_attach(device_t dev)
6023 {
6024         struct iwm_softc *sc;
6025         int count, error, rid;
6026         uint16_t reg;
6027
6028         sc = device_get_softc(dev);
6029
6030         /* We disable the RETRY_TIMEOUT register (0x41) to keep
6031          * PCI Tx retries from interfering with C3 CPU state */
6032         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6033
6034         /* Enable bus-mastering and hardware bug workaround. */
6035         pci_enable_busmaster(dev);
6036         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
6037         /* if !MSI */
6038         if (reg & PCIM_STATUS_INTxSTATE) {
6039                 reg &= ~PCIM_STATUS_INTxSTATE;
6040         }
6041         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
6042
6043         rid = PCIR_BAR(0);
6044         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
6045             RF_ACTIVE);
6046         if (sc->sc_mem == NULL) {
6047                 device_printf(sc->sc_dev, "can't map mem space\n");
6048                 return (ENXIO);
6049         }
6050         sc->sc_st = rman_get_bustag(sc->sc_mem);
6051         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
6052
6053         /* Install interrupt handler. */
6054         count = 1;
6055         rid = 0;
6056         if (pci_alloc_msi(dev, &count) == 0)
6057                 rid = 1;
6058         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
6059             (rid != 0 ? 0 : RF_SHAREABLE));
6060         if (sc->sc_irq == NULL) {
6061                 device_printf(dev, "can't map interrupt\n");
6062                         return (ENXIO);
6063         }
6064         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
6065             NULL, iwm_intr, sc, &sc->sc_ih);
6066         if (sc->sc_ih == NULL) {
6067                 device_printf(dev, "can't establish interrupt");
6068                         return (ENXIO);
6069         }
6070         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
6071
6072         return (0);
6073 }
6074
6075 static void
6076 iwm_pci_detach(device_t dev)
6077 {
6078         struct iwm_softc *sc = device_get_softc(dev);
6079
6080         if (sc->sc_irq != NULL) {
6081                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
6082                 bus_release_resource(dev, SYS_RES_IRQ,
6083                     rman_get_rid(sc->sc_irq), sc->sc_irq);
6084                 pci_release_msi(dev);
6085         }
6086         if (sc->sc_mem != NULL)
6087                 bus_release_resource(dev, SYS_RES_MEMORY,
6088                     rman_get_rid(sc->sc_mem), sc->sc_mem);
6089 }
6090
6091
6092
6093 static int
6094 iwm_attach(device_t dev)
6095 {
6096         struct iwm_softc *sc = device_get_softc(dev);
6097         struct ieee80211com *ic = &sc->sc_ic;
6098         int error;
6099         int txq_i, i;
6100
6101         sc->sc_dev = dev;
6102         sc->sc_attached = 1;
6103         IWM_LOCK_INIT(sc);
6104         mbufq_init(&sc->sc_snd, ifqmaxlen);
6105         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
6106         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
6107         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
6108
6109         sc->sc_notif_wait = iwm_notification_wait_init(sc);
6110         if (sc->sc_notif_wait == NULL) {
6111                 device_printf(dev, "failed to init notification wait struct\n");
6112                 goto fail;
6113         }
6114
6115         /* Init phy db */
6116         sc->sc_phy_db = iwm_phy_db_init(sc);
6117         if (!sc->sc_phy_db) {
6118                 device_printf(dev, "Cannot init phy_db\n");
6119                 goto fail;
6120         }
6121
6122         /* PCI attach */
6123         error = iwm_pci_attach(dev);
6124         if (error != 0)
6125                 goto fail;
6126
6127         sc->sc_wantresp = -1;
6128
6129         /* Check device type */
6130         error = iwm_dev_check(dev);
6131         if (error != 0)
6132                 goto fail;
6133
6134         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6135         /*
6136          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6137          * changed, and now the revision step also includes bit 0-1 (no more
6138          * "dash" value). To keep hw_rev backwards compatible - we'll store it
6139          * in the old format.
6140          */
6141         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6142                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6143                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6144
6145         if (iwm_prepare_card_hw(sc) != 0) {
6146                 device_printf(dev, "could not initialize hardware\n");
6147                 goto fail;
6148         }
6149
6150         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6151                 int ret;
6152                 uint32_t hw_step;
6153
6154                 /*
6155                  * In order to recognize C step the driver should read the
6156                  * chip version id located at the AUX bus MISC address.
6157                  */
6158                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6159                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6160                 DELAY(2);
6161
6162                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6163                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6164                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6165                                    25000);
6166                 if (!ret) {
6167                         device_printf(sc->sc_dev,
6168                             "Failed to wake up the nic\n");
6169                         goto fail;
6170                 }
6171
6172                 if (iwm_nic_lock(sc)) {
6173                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6174                         hw_step |= IWM_ENABLE_WFPM;
6175                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6176                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6177                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6178                         if (hw_step == 0x3)
6179                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6180                                                 (IWM_SILICON_C_STEP << 2);
6181                         iwm_nic_unlock(sc);
6182                 } else {
6183                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
6184                         goto fail;
6185                 }
6186         }
6187
6188         /* special-case 7265D, it has the same PCI IDs. */
6189         if (sc->cfg == &iwm7265_cfg &&
6190             (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6191                 sc->cfg = &iwm7265d_cfg;
6192         }
6193
6194         /* Allocate DMA memory for firmware transfers. */
6195         if ((error = iwm_alloc_fwmem(sc)) != 0) {
6196                 device_printf(dev, "could not allocate memory for firmware\n");
6197                 goto fail;
6198         }
6199
6200         /* Allocate "Keep Warm" page. */
6201         if ((error = iwm_alloc_kw(sc)) != 0) {
6202                 device_printf(dev, "could not allocate keep warm page\n");
6203                 goto fail;
6204         }
6205
6206         /* We use ICT interrupts */
6207         if ((error = iwm_alloc_ict(sc)) != 0) {
6208                 device_printf(dev, "could not allocate ICT table\n");
6209                 goto fail;
6210         }
6211
6212         /* Allocate TX scheduler "rings". */
6213         if ((error = iwm_alloc_sched(sc)) != 0) {
6214                 device_printf(dev, "could not allocate TX scheduler rings\n");
6215                 goto fail;
6216         }
6217
6218         /* Allocate TX rings */
6219         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6220                 if ((error = iwm_alloc_tx_ring(sc,
6221                     &sc->txq[txq_i], txq_i)) != 0) {
6222                         device_printf(dev,
6223                             "could not allocate TX ring %d\n",
6224                             txq_i);
6225                         goto fail;
6226                 }
6227         }
6228
6229         /* Allocate RX ring. */
6230         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6231                 device_printf(dev, "could not allocate RX ring\n");
6232                 goto fail;
6233         }
6234
6235         /* Clear pending interrupts. */
6236         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6237
6238         ic->ic_softc = sc;
6239         ic->ic_name = device_get_nameunit(sc->sc_dev);
6240         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
6241         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
6242
6243         /* Set device capabilities. */
6244         ic->ic_caps =
6245             IEEE80211_C_STA |
6246             IEEE80211_C_WPA |           /* WPA/RSN */
6247             IEEE80211_C_WME |
6248             IEEE80211_C_SHSLOT |        /* short slot time supported */
6249             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
6250 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
6251             ;
6252         /* Advertise full-offload scanning */
6253         ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6254         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6255                 sc->sc_phyctxt[i].id = i;
6256                 sc->sc_phyctxt[i].color = 0;
6257                 sc->sc_phyctxt[i].ref = 0;
6258                 sc->sc_phyctxt[i].channel = NULL;
6259         }
6260
6261         /* Default noise floor */
6262         sc->sc_noise = -96;
6263
6264         /* Max RSSI */
6265         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6266
6267         sc->sc_preinit_hook.ich_func = iwm_preinit;
6268         sc->sc_preinit_hook.ich_arg = sc;
6269         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6270                 device_printf(dev, "config_intrhook_establish failed\n");
6271                 goto fail;
6272         }
6273
6274 #ifdef IWM_DEBUG
6275         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6276             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6277             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6278 #endif
6279
6280         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6281             "<-%s\n", __func__);
6282
6283         return 0;
6284
6285         /* Free allocated memory if something failed during attachment. */
6286 fail:
6287         iwm_detach_local(sc, 0);
6288
6289         return ENXIO;
6290 }
6291
6292 static int
6293 iwm_is_valid_ether_addr(uint8_t *addr)
6294 {
6295         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6296
6297         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6298                 return (FALSE);
6299
6300         return (TRUE);
6301 }
6302
6303 static int
6304 iwm_update_edca(struct ieee80211com *ic)
6305 {
6306         struct iwm_softc *sc = ic->ic_softc;
6307
6308         device_printf(sc->sc_dev, "%s: called\n", __func__);
6309         return (0);
6310 }
6311
6312 static void
6313 iwm_preinit(void *arg)
6314 {
6315         struct iwm_softc *sc = arg;
6316         device_t dev = sc->sc_dev;
6317         struct ieee80211com *ic = &sc->sc_ic;
6318         int error;
6319
6320         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6321             "->%s\n", __func__);
6322
6323         IWM_LOCK(sc);
6324         if ((error = iwm_start_hw(sc)) != 0) {
6325                 device_printf(dev, "could not initialize hardware\n");
6326                 IWM_UNLOCK(sc);
6327                 goto fail;
6328         }
6329
6330         error = iwm_run_init_mvm_ucode(sc, 1);
6331         iwm_stop_device(sc);
6332         if (error) {
6333                 IWM_UNLOCK(sc);
6334                 goto fail;
6335         }
6336         device_printf(dev,
6337             "hw rev 0x%x, fw ver %s, address %s\n",
6338             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6339             sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6340
6341         /* not all hardware can do 5GHz band */
6342         if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6343                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6344                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6345         IWM_UNLOCK(sc);
6346
6347         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6348             ic->ic_channels);
6349
6350         /*
6351          * At this point we've committed - if we fail to do setup,
6352          * we now also have to tear down the net80211 state.
6353          */
6354         ieee80211_ifattach(ic);
6355         ic->ic_vap_create = iwm_vap_create;
6356         ic->ic_vap_delete = iwm_vap_delete;
6357         ic->ic_raw_xmit = iwm_raw_xmit;
6358         ic->ic_node_alloc = iwm_node_alloc;
6359         ic->ic_scan_start = iwm_scan_start;
6360         ic->ic_scan_end = iwm_scan_end;
6361         ic->ic_update_mcast = iwm_update_mcast;
6362         ic->ic_getradiocaps = iwm_init_channel_map;
6363         ic->ic_set_channel = iwm_set_channel;
6364         ic->ic_scan_curchan = iwm_scan_curchan;
6365         ic->ic_scan_mindwell = iwm_scan_mindwell;
6366         ic->ic_wme.wme_update = iwm_update_edca;
6367         ic->ic_parent = iwm_parent;
6368         ic->ic_transmit = iwm_transmit;
6369         iwm_radiotap_attach(sc);
6370         if (bootverbose)
6371                 ieee80211_announce(ic);
6372
6373         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6374             "<-%s\n", __func__);
6375         config_intrhook_disestablish(&sc->sc_preinit_hook);
6376
6377         return;
6378 fail:
6379         config_intrhook_disestablish(&sc->sc_preinit_hook);
6380         iwm_detach_local(sc, 0);
6381 }
6382
6383 /*
6384  * Attach the interface to 802.11 radiotap.
6385  */
6386 static void
6387 iwm_radiotap_attach(struct iwm_softc *sc)
6388 {
6389         struct ieee80211com *ic = &sc->sc_ic;
6390
6391         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6392             "->%s begin\n", __func__);
6393         ieee80211_radiotap_attach(ic,
6394             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6395                 IWM_TX_RADIOTAP_PRESENT,
6396             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6397                 IWM_RX_RADIOTAP_PRESENT);
6398         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6399             "->%s end\n", __func__);
6400 }
6401
6402 static struct ieee80211vap *
6403 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6404     enum ieee80211_opmode opmode, int flags,
6405     const uint8_t bssid[IEEE80211_ADDR_LEN],
6406     const uint8_t mac[IEEE80211_ADDR_LEN])
6407 {
6408         struct iwm_vap *ivp;
6409         struct ieee80211vap *vap;
6410
6411         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6412                 return NULL;
6413         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6414         vap = &ivp->iv_vap;
6415         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6416         vap->iv_bmissthreshold = 10;            /* override default */
6417         /* Override with driver methods. */
6418         ivp->iv_newstate = vap->iv_newstate;
6419         vap->iv_newstate = iwm_newstate;
6420
6421         ieee80211_ratectl_init(vap);
6422         /* Complete setup. */
6423         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6424             mac);
6425         ic->ic_opmode = opmode;
6426
6427         return vap;
6428 }
6429
6430 static void
6431 iwm_vap_delete(struct ieee80211vap *vap)
6432 {
6433         struct iwm_vap *ivp = IWM_VAP(vap);
6434
6435         ieee80211_ratectl_deinit(vap);
6436         ieee80211_vap_detach(vap);
6437         free(ivp, M_80211_VAP);
6438 }
6439
6440 static void
6441 iwm_scan_start(struct ieee80211com *ic)
6442 {
6443         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6444         struct iwm_softc *sc = ic->ic_softc;
6445         int error;
6446
6447         IWM_LOCK(sc);
6448         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6449                 /* This should not be possible */
6450                 device_printf(sc->sc_dev,
6451                     "%s: Previous scan not completed yet\n", __func__);
6452         }
6453         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6454                 error = iwm_mvm_umac_scan(sc);
6455         else
6456                 error = iwm_mvm_lmac_scan(sc);
6457         if (error != 0) {
6458                 device_printf(sc->sc_dev, "could not initiate scan\n");
6459                 IWM_UNLOCK(sc);
6460                 ieee80211_cancel_scan(vap);
6461         } else {
6462                 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6463                 iwm_led_blink_start(sc);
6464                 IWM_UNLOCK(sc);
6465         }
6466 }
6467
6468 static void
6469 iwm_scan_end(struct ieee80211com *ic)
6470 {
6471         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6472         struct iwm_softc *sc = ic->ic_softc;
6473
6474         IWM_LOCK(sc);
6475         iwm_led_blink_stop(sc);
6476         if (vap->iv_state == IEEE80211_S_RUN)
6477                 iwm_mvm_led_enable(sc);
6478         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6479                 /*
6480                  * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6481                  * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6482                  * taskqueue.
6483                  */
6484                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6485                 iwm_mvm_scan_stop_wait(sc);
6486         }
6487         IWM_UNLOCK(sc);
6488
6489         /*
6490          * Make sure we don't race, if sc_es_task is still enqueued here.
6491          * This is to make sure that it won't call ieee80211_scan_done
6492          * when we have already started the next scan.
6493          */
6494         taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6495 }
6496
6497 static void
6498 iwm_update_mcast(struct ieee80211com *ic)
6499 {
6500 }
6501
6502 static void
6503 iwm_set_channel(struct ieee80211com *ic)
6504 {
6505 }
6506
6507 static void
6508 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6509 {
6510 }
6511
6512 static void
6513 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6514 {
6515         return;
6516 }
6517
6518 void
6519 iwm_init_task(void *arg1)
6520 {
6521         struct iwm_softc *sc = arg1;
6522
6523         IWM_LOCK(sc);
6524         while (sc->sc_flags & IWM_FLAG_BUSY)
6525                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6526         sc->sc_flags |= IWM_FLAG_BUSY;
6527         iwm_stop(sc);
6528         if (sc->sc_ic.ic_nrunning > 0)
6529                 iwm_init(sc);
6530         sc->sc_flags &= ~IWM_FLAG_BUSY;
6531         wakeup(&sc->sc_flags);
6532         IWM_UNLOCK(sc);
6533 }
6534
6535 static int
6536 iwm_resume(device_t dev)
6537 {
6538         struct iwm_softc *sc = device_get_softc(dev);
6539         int do_reinit = 0;
6540
6541         /*
6542          * We disable the RETRY_TIMEOUT register (0x41) to keep
6543          * PCI Tx retries from interfering with C3 CPU state.
6544          */
6545         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6546         iwm_init_task(device_get_softc(dev));
6547
6548         IWM_LOCK(sc);
6549         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6550                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6551                 do_reinit = 1;
6552         }
6553         IWM_UNLOCK(sc);
6554
6555         if (do_reinit)
6556                 ieee80211_resume_all(&sc->sc_ic);
6557
6558         return 0;
6559 }
6560
6561 static int
6562 iwm_suspend(device_t dev)
6563 {
6564         int do_stop = 0;
6565         struct iwm_softc *sc = device_get_softc(dev);
6566
6567         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6568
6569         ieee80211_suspend_all(&sc->sc_ic);
6570
6571         if (do_stop) {
6572                 IWM_LOCK(sc);
6573                 iwm_stop(sc);
6574                 sc->sc_flags |= IWM_FLAG_SCANNING;
6575                 IWM_UNLOCK(sc);
6576         }
6577
6578         return (0);
6579 }
6580
6581 static int
6582 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6583 {
6584         struct iwm_fw_info *fw = &sc->sc_fw;
6585         device_t dev = sc->sc_dev;
6586         int i;
6587
6588         if (!sc->sc_attached)
6589                 return 0;
6590         sc->sc_attached = 0;
6591
6592         if (do_net80211)
6593                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6594
6595         callout_drain(&sc->sc_led_blink_to);
6596         callout_drain(&sc->sc_watchdog_to);
6597         iwm_stop_device(sc);
6598         if (do_net80211) {
6599                 ieee80211_ifdetach(&sc->sc_ic);
6600         }
6601
6602         iwm_phy_db_free(sc->sc_phy_db);
6603         sc->sc_phy_db = NULL;
6604
6605         iwm_free_nvm_data(sc->nvm_data);
6606
6607         /* Free descriptor rings */
6608         iwm_free_rx_ring(sc, &sc->rxq);
6609         for (i = 0; i < nitems(sc->txq); i++)
6610                 iwm_free_tx_ring(sc, &sc->txq[i]);
6611
6612         /* Free firmware */
6613         if (fw->fw_fp != NULL)
6614                 iwm_fw_info_free(fw);
6615
6616         /* Free scheduler */
6617         iwm_dma_contig_free(&sc->sched_dma);
6618         iwm_dma_contig_free(&sc->ict_dma);
6619         iwm_dma_contig_free(&sc->kw_dma);
6620         iwm_dma_contig_free(&sc->fw_dma);
6621
6622         /* Finished with the hardware - detach things */
6623         iwm_pci_detach(dev);
6624
6625         if (sc->sc_notif_wait != NULL) {
6626                 iwm_notification_wait_free(sc->sc_notif_wait);
6627                 sc->sc_notif_wait = NULL;
6628         }
6629
6630         mbufq_drain(&sc->sc_snd);
6631         IWM_LOCK_DESTROY(sc);
6632
6633         return (0);
6634 }
6635
6636 static int
6637 iwm_detach(device_t dev)
6638 {
6639         struct iwm_softc *sc = device_get_softc(dev);
6640
6641         return (iwm_detach_local(sc, 1));
6642 }
6643
6644 static device_method_t iwm_pci_methods[] = {
6645         /* Device interface */
6646         DEVMETHOD(device_probe,         iwm_probe),
6647         DEVMETHOD(device_attach,        iwm_attach),
6648         DEVMETHOD(device_detach,        iwm_detach),
6649         DEVMETHOD(device_suspend,       iwm_suspend),
6650         DEVMETHOD(device_resume,        iwm_resume),
6651
6652         DEVMETHOD_END
6653 };
6654
6655 static driver_t iwm_pci_driver = {
6656         "iwm",
6657         iwm_pci_methods,
6658         sizeof (struct iwm_softc)
6659 };
6660
6661 static devclass_t iwm_devclass;
6662
6663 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6664 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6665 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6666 MODULE_DEPEND(iwm, wlan, 1, 1, 1);