]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
Update to ELF Tool Chain r3490
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109
110 #include <sys/param.h>
111 #include <sys/bus.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
126
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
130
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
133
134 #include <net/bpf.h>
135
136 #include <net/if.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
142
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
147
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
152
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_util.h>
157 #include <dev/iwm/if_iwm_binding.h>
158 #include <dev/iwm/if_iwm_phy_db.h>
159 #include <dev/iwm/if_iwm_mac_ctxt.h>
160 #include <dev/iwm/if_iwm_phy_ctxt.h>
161 #include <dev/iwm/if_iwm_time_event.h>
162 #include <dev/iwm/if_iwm_power.h>
163 #include <dev/iwm/if_iwm_scan.h>
164
165 #include <dev/iwm/if_iwm_pcie_trans.h>
166 #include <dev/iwm/if_iwm_led.h>
167
168 const uint8_t iwm_nvm_channels[] = {
169         /* 2.4 GHz */
170         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
171         /* 5 GHz */
172         36, 40, 44, 48, 52, 56, 60, 64,
173         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
174         149, 153, 157, 161, 165
175 };
176 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
177     "IWM_NUM_CHANNELS is too small");
178
179 const uint8_t iwm_nvm_channels_8000[] = {
180         /* 2.4 GHz */
181         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
182         /* 5 GHz */
183         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
184         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
185         149, 153, 157, 161, 165, 169, 173, 177, 181
186 };
187 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
188     "IWM_NUM_CHANNELS_8000 is too small");
189
190 #define IWM_NUM_2GHZ_CHANNELS   14
191 #define IWM_N_HW_ADDR_MASK      0xF
192
193 /*
194  * XXX For now, there's simply a fixed set of rate table entries
195  * that are populated.
196  */
197 const struct iwm_rate {
198         uint8_t rate;
199         uint8_t plcp;
200 } iwm_rates[] = {
201         {   2,  IWM_RATE_1M_PLCP  },
202         {   4,  IWM_RATE_2M_PLCP  },
203         {  11,  IWM_RATE_5M_PLCP  },
204         {  22,  IWM_RATE_11M_PLCP },
205         {  12,  IWM_RATE_6M_PLCP  },
206         {  18,  IWM_RATE_9M_PLCP  },
207         {  24,  IWM_RATE_12M_PLCP },
208         {  36,  IWM_RATE_18M_PLCP },
209         {  48,  IWM_RATE_24M_PLCP },
210         {  72,  IWM_RATE_36M_PLCP },
211         {  96,  IWM_RATE_48M_PLCP },
212         { 108,  IWM_RATE_54M_PLCP },
213 };
214 #define IWM_RIDX_CCK    0
215 #define IWM_RIDX_OFDM   4
216 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
217 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
218 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
219
220 struct iwm_nvm_section {
221         uint16_t length;
222         uint8_t *data;
223 };
224
225 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
226 static int      iwm_firmware_store_section(struct iwm_softc *,
227                                            enum iwm_ucode_type,
228                                            const uint8_t *, size_t);
229 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
230 static void     iwm_fw_info_free(struct iwm_fw_info *);
231 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
232 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
233 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
234                                      bus_size_t, bus_size_t);
235 static void     iwm_dma_contig_free(struct iwm_dma_info *);
236 static int      iwm_alloc_fwmem(struct iwm_softc *);
237 static void     iwm_free_fwmem(struct iwm_softc *);
238 static int      iwm_alloc_sched(struct iwm_softc *);
239 static void     iwm_free_sched(struct iwm_softc *);
240 static int      iwm_alloc_kw(struct iwm_softc *);
241 static void     iwm_free_kw(struct iwm_softc *);
242 static int      iwm_alloc_ict(struct iwm_softc *);
243 static void     iwm_free_ict(struct iwm_softc *);
244 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
245 static void     iwm_disable_rx_dma(struct iwm_softc *);
246 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
247 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
248 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
249                                   int);
250 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
251 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
252 static void     iwm_enable_interrupts(struct iwm_softc *);
253 static void     iwm_restore_interrupts(struct iwm_softc *);
254 static void     iwm_disable_interrupts(struct iwm_softc *);
255 static void     iwm_ict_reset(struct iwm_softc *);
256 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
257 static void     iwm_stop_device(struct iwm_softc *);
258 static void     iwm_mvm_nic_config(struct iwm_softc *);
259 static int      iwm_nic_rx_init(struct iwm_softc *);
260 static int      iwm_nic_tx_init(struct iwm_softc *);
261 static int      iwm_nic_init(struct iwm_softc *);
262 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
263 static int      iwm_post_alive(struct iwm_softc *);
264 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
265                                    uint16_t, uint8_t *, uint16_t *);
266 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
267                                      uint16_t *, size_t);
268 static uint32_t iwm_eeprom_channel_flags(uint16_t);
269 static void     iwm_add_channel_band(struct iwm_softc *,
270                     struct ieee80211_channel[], int, int *, int, size_t,
271                     const uint8_t[]);
272 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
273                     struct ieee80211_channel[]);
274 static int      iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
275                                    const uint16_t *, const uint16_t *,
276                                    const uint16_t *, const uint16_t *,
277                                    const uint16_t *);
278 static void     iwm_set_hw_address_8000(struct iwm_softc *,
279                                         struct iwm_nvm_data *,
280                                         const uint16_t *, const uint16_t *);
281 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
282                             const uint16_t *);
283 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
284 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
285                                   const uint16_t *);
286 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
287                                    const uint16_t *);
288 static void     iwm_set_radio_cfg(const struct iwm_softc *,
289                                   struct iwm_nvm_data *, uint32_t);
290 static int      iwm_parse_nvm_sections(struct iwm_softc *,
291                                        struct iwm_nvm_section *);
292 static int      iwm_nvm_init(struct iwm_softc *);
293 static int      iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
294                                        const uint8_t *, uint32_t);
295 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
296                                         const uint8_t *, uint32_t);
297 static int      iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
298 static int      iwm_load_cpu_sections_8000(struct iwm_softc *,
299                                            struct iwm_fw_sects *, int , int *);
300 static int      iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
301 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
302 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
303 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
304 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
305 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
306                                               enum iwm_ucode_type);
307 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
308 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
309 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
310 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
311                                             struct iwm_rx_phy_info *);
312 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
313                                       struct iwm_rx_packet *,
314                                       struct iwm_rx_data *);
315 static int      iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
316 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
317                                    struct iwm_rx_data *);
318 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
319                                          struct iwm_rx_packet *,
320                                          struct iwm_node *);
321 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
322                                   struct iwm_rx_data *);
323 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
324 #if 0
325 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
326                                  uint16_t);
327 #endif
328 static const struct iwm_rate *
329         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
330                         struct ieee80211_frame *, struct iwm_tx_cmd *);
331 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
332                        struct ieee80211_node *, int);
333 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
334                              const struct ieee80211_bpf_params *);
335 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
336                                                 struct iwm_mvm_add_sta_cmd_v7 *,
337                                                 int *);
338 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
339                                        int);
340 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
341 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
342 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
343                                            struct iwm_int_sta *,
344                                            const uint8_t *, uint16_t, uint16_t);
345 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
346 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
347 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
348 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
349 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
350 static struct ieee80211_node *
351                 iwm_node_alloc(struct ieee80211vap *,
352                                const uint8_t[IEEE80211_ADDR_LEN]);
353 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
354 static int      iwm_media_change(struct ifnet *);
355 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
356 static void     iwm_endscan_cb(void *, int);
357 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
358                                         struct iwm_sf_cfg_cmd *,
359                                         struct ieee80211_node *);
360 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
361 static int      iwm_send_bt_init_conf(struct iwm_softc *);
362 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
363 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
364 static int      iwm_init_hw(struct iwm_softc *);
365 static void     iwm_init(struct iwm_softc *);
366 static void     iwm_start(struct iwm_softc *);
367 static void     iwm_stop(struct iwm_softc *);
368 static void     iwm_watchdog(void *);
369 static void     iwm_parent(struct ieee80211com *);
370 #ifdef IWM_DEBUG
371 static const char *
372                 iwm_desc_lookup(uint32_t);
373 static void     iwm_nic_error(struct iwm_softc *);
374 static void     iwm_nic_umac_error(struct iwm_softc *);
375 #endif
376 static void     iwm_notif_intr(struct iwm_softc *);
377 static void     iwm_intr(void *);
378 static int      iwm_attach(device_t);
379 static int      iwm_is_valid_ether_addr(uint8_t *);
380 static void     iwm_preinit(void *);
381 static int      iwm_detach_local(struct iwm_softc *sc, int);
382 static void     iwm_init_task(void *);
383 static void     iwm_radiotap_attach(struct iwm_softc *);
384 static struct ieee80211vap *
385                 iwm_vap_create(struct ieee80211com *,
386                                const char [IFNAMSIZ], int,
387                                enum ieee80211_opmode, int,
388                                const uint8_t [IEEE80211_ADDR_LEN],
389                                const uint8_t [IEEE80211_ADDR_LEN]);
390 static void     iwm_vap_delete(struct ieee80211vap *);
391 static void     iwm_scan_start(struct ieee80211com *);
392 static void     iwm_scan_end(struct ieee80211com *);
393 static void     iwm_update_mcast(struct ieee80211com *);
394 static void     iwm_set_channel(struct ieee80211com *);
395 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
396 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
397 static int      iwm_detach(device_t);
398
399 /*
400  * Firmware parser.
401  */
402
403 static int
404 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
405 {
406         const struct iwm_fw_cscheme_list *l = (const void *)data;
407
408         if (dlen < sizeof(*l) ||
409             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
410                 return EINVAL;
411
412         /* we don't actually store anything for now, always use s/w crypto */
413
414         return 0;
415 }
416
417 static int
418 iwm_firmware_store_section(struct iwm_softc *sc,
419     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
420 {
421         struct iwm_fw_sects *fws;
422         struct iwm_fw_onesect *fwone;
423
424         if (type >= IWM_UCODE_TYPE_MAX)
425                 return EINVAL;
426         if (dlen < sizeof(uint32_t))
427                 return EINVAL;
428
429         fws = &sc->sc_fw.fw_sects[type];
430         if (fws->fw_count >= IWM_UCODE_SECT_MAX)
431                 return EINVAL;
432
433         fwone = &fws->fw_sect[fws->fw_count];
434
435         /* first 32bit are device load offset */
436         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
437
438         /* rest is data */
439         fwone->fws_data = data + sizeof(uint32_t);
440         fwone->fws_len = dlen - sizeof(uint32_t);
441
442         fws->fw_count++;
443         fws->fw_totlen += fwone->fws_len;
444
445         return 0;
446 }
447
448 /* iwlwifi: iwl-drv.c */
449 struct iwm_tlv_calib_data {
450         uint32_t ucode_type;
451         struct iwm_tlv_calib_ctrl calib;
452 } __packed;
453
454 static int
455 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
456 {
457         const struct iwm_tlv_calib_data *def_calib = data;
458         uint32_t ucode_type = le32toh(def_calib->ucode_type);
459
460         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
461                 device_printf(sc->sc_dev,
462                     "Wrong ucode_type %u for default "
463                     "calibration.\n", ucode_type);
464                 return EINVAL;
465         }
466
467         sc->sc_default_calib[ucode_type].flow_trigger =
468             def_calib->calib.flow_trigger;
469         sc->sc_default_calib[ucode_type].event_trigger =
470             def_calib->calib.event_trigger;
471
472         return 0;
473 }
474
475 static void
476 iwm_fw_info_free(struct iwm_fw_info *fw)
477 {
478         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
479         fw->fw_fp = NULL;
480         /* don't touch fw->fw_status */
481         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
482 }
483
484 static int
485 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
486 {
487         struct iwm_fw_info *fw = &sc->sc_fw;
488         const struct iwm_tlv_ucode_header *uhdr;
489         struct iwm_ucode_tlv tlv;
490         enum iwm_ucode_tlv_type tlv_type;
491         const struct firmware *fwp;
492         const uint8_t *data;
493         int error = 0;
494         size_t len;
495
496         if (fw->fw_status == IWM_FW_STATUS_DONE &&
497             ucode_type != IWM_UCODE_TYPE_INIT)
498                 return 0;
499
500         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
501                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
502         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
503
504         if (fw->fw_fp != NULL)
505                 iwm_fw_info_free(fw);
506
507         /*
508          * Load firmware into driver memory.
509          * fw_fp will be set.
510          */
511         IWM_UNLOCK(sc);
512         fwp = firmware_get(sc->sc_fwname);
513         IWM_LOCK(sc);
514         if (fwp == NULL) {
515                 device_printf(sc->sc_dev,
516                     "could not read firmware %s (error %d)\n",
517                     sc->sc_fwname, error);
518                 goto out;
519         }
520         fw->fw_fp = fwp;
521
522         /* (Re-)Initialize default values. */
523         sc->sc_capaflags = 0;
524         sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
525         memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
526         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
527
528         /*
529          * Parse firmware contents
530          */
531
532         uhdr = (const void *)fw->fw_fp->data;
533         if (*(const uint32_t *)fw->fw_fp->data != 0
534             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
535                 device_printf(sc->sc_dev, "invalid firmware %s\n",
536                     sc->sc_fwname);
537                 error = EINVAL;
538                 goto out;
539         }
540
541         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
542             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
543             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
544             IWM_UCODE_API(le32toh(uhdr->ver)));
545         data = uhdr->data;
546         len = fw->fw_fp->datasize - sizeof(*uhdr);
547
548         while (len >= sizeof(tlv)) {
549                 size_t tlv_len;
550                 const void *tlv_data;
551
552                 memcpy(&tlv, data, sizeof(tlv));
553                 tlv_len = le32toh(tlv.length);
554                 tlv_type = le32toh(tlv.type);
555
556                 len -= sizeof(tlv);
557                 data += sizeof(tlv);
558                 tlv_data = data;
559
560                 if (len < tlv_len) {
561                         device_printf(sc->sc_dev,
562                             "firmware too short: %zu bytes\n",
563                             len);
564                         error = EINVAL;
565                         goto parse_out;
566                 }
567
568                 switch ((int)tlv_type) {
569                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
570                         if (tlv_len < sizeof(uint32_t)) {
571                                 device_printf(sc->sc_dev,
572                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
573                                     __func__,
574                                     (int) tlv_len);
575                                 error = EINVAL;
576                                 goto parse_out;
577                         }
578                         sc->sc_capa_max_probe_len
579                             = le32toh(*(const uint32_t *)tlv_data);
580                         /* limit it to something sensible */
581                         if (sc->sc_capa_max_probe_len >
582                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
583                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
584                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
585                                     "ridiculous\n", __func__);
586                                 error = EINVAL;
587                                 goto parse_out;
588                         }
589                         break;
590                 case IWM_UCODE_TLV_PAN:
591                         if (tlv_len) {
592                                 device_printf(sc->sc_dev,
593                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
594                                     __func__,
595                                     (int) tlv_len);
596                                 error = EINVAL;
597                                 goto parse_out;
598                         }
599                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
600                         break;
601                 case IWM_UCODE_TLV_FLAGS:
602                         if (tlv_len < sizeof(uint32_t)) {
603                                 device_printf(sc->sc_dev,
604                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
605                                     __func__,
606                                     (int) tlv_len);
607                                 error = EINVAL;
608                                 goto parse_out;
609                         }
610                         /*
611                          * Apparently there can be many flags, but Linux driver
612                          * parses only the first one, and so do we.
613                          *
614                          * XXX: why does this override IWM_UCODE_TLV_PAN?
615                          * Intentional or a bug?  Observations from
616                          * current firmware file:
617                          *  1) TLV_PAN is parsed first
618                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
619                          * ==> this resets TLV_PAN to itself... hnnnk
620                          */
621                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
622                         break;
623                 case IWM_UCODE_TLV_CSCHEME:
624                         if ((error = iwm_store_cscheme(sc,
625                             tlv_data, tlv_len)) != 0) {
626                                 device_printf(sc->sc_dev,
627                                     "%s: iwm_store_cscheme(): returned %d\n",
628                                     __func__,
629                                     error);
630                                 goto parse_out;
631                         }
632                         break;
633                 case IWM_UCODE_TLV_NUM_OF_CPU: {
634                         uint32_t num_cpu;
635                         if (tlv_len != sizeof(uint32_t)) {
636                                 device_printf(sc->sc_dev,
637                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
638                                     __func__,
639                                     (int) tlv_len);
640                                 error = EINVAL;
641                                 goto parse_out;
642                         }
643                         num_cpu = le32toh(*(const uint32_t *)tlv_data);
644                         if (num_cpu < 1 || num_cpu > 2) {
645                                 device_printf(sc->sc_dev,
646                                     "%s: Driver supports only 1 or 2 CPUs\n",
647                                     __func__);
648                                 error = EINVAL;
649                                 goto parse_out;
650                         }
651                         break;
652                 }
653                 case IWM_UCODE_TLV_SEC_RT:
654                         if ((error = iwm_firmware_store_section(sc,
655                             IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
656                                 device_printf(sc->sc_dev,
657                                     "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
658                                     __func__,
659                                     error);
660                                 goto parse_out;
661                         }
662                         break;
663                 case IWM_UCODE_TLV_SEC_INIT:
664                         if ((error = iwm_firmware_store_section(sc,
665                             IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
666                                 device_printf(sc->sc_dev,
667                                     "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
668                                     __func__,
669                                     error);
670                                 goto parse_out;
671                         }
672                         break;
673                 case IWM_UCODE_TLV_SEC_WOWLAN:
674                         if ((error = iwm_firmware_store_section(sc,
675                             IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
676                                 device_printf(sc->sc_dev,
677                                     "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
678                                     __func__,
679                                     error);
680                                 goto parse_out;
681                         }
682                         break;
683                 case IWM_UCODE_TLV_DEF_CALIB:
684                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
685                                 device_printf(sc->sc_dev,
686                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
687                                     __func__,
688                                     (int) tlv_len,
689                                     (int) sizeof(struct iwm_tlv_calib_data));
690                                 error = EINVAL;
691                                 goto parse_out;
692                         }
693                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
694                                 device_printf(sc->sc_dev,
695                                     "%s: iwm_set_default_calib() failed: %d\n",
696                                     __func__,
697                                     error);
698                                 goto parse_out;
699                         }
700                         break;
701                 case IWM_UCODE_TLV_PHY_SKU:
702                         if (tlv_len != sizeof(uint32_t)) {
703                                 error = EINVAL;
704                                 device_printf(sc->sc_dev,
705                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
706                                     __func__,
707                                     (int) tlv_len);
708                                 goto parse_out;
709                         }
710                         sc->sc_fw_phy_config =
711                             le32toh(*(const uint32_t *)tlv_data);
712                         break;
713
714                 case IWM_UCODE_TLV_API_CHANGES_SET: {
715                         const struct iwm_ucode_api *api;
716                         if (tlv_len != sizeof(*api)) {
717                                 error = EINVAL;
718                                 goto parse_out;
719                         }
720                         api = (const struct iwm_ucode_api *)tlv_data;
721                         /* Flags may exceed 32 bits in future firmware. */
722                         if (le32toh(api->api_index) > 0) {
723                                 device_printf(sc->sc_dev,
724                                     "unsupported API index %d\n",
725                                     le32toh(api->api_index));
726                                 goto parse_out;
727                         }
728                         sc->sc_ucode_api = le32toh(api->api_flags);
729                         break;
730                 }
731
732                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
733                         const struct iwm_ucode_capa *capa;
734                         int idx, i;
735                         if (tlv_len != sizeof(*capa)) {
736                                 error = EINVAL;
737                                 goto parse_out;
738                         }
739                         capa = (const struct iwm_ucode_capa *)tlv_data;
740                         idx = le32toh(capa->api_index);
741                         if (idx > howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
742                                 device_printf(sc->sc_dev,
743                                     "unsupported API index %d\n", idx);
744                                 goto parse_out;
745                         }
746                         for (i = 0; i < 32; i++) {
747                                 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
748                                         continue;
749                                 setbit(sc->sc_enabled_capa, i + (32 * idx));
750                         }
751                         break;
752                 }
753
754                 case 48: /* undocumented TLV */
755                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
756                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
757                         /* ignore, not used by current driver */
758                         break;
759
760                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
761                         if ((error = iwm_firmware_store_section(sc,
762                             IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
763                             tlv_len)) != 0)
764                                 goto parse_out;
765                         break;
766
767                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
768                         if (tlv_len != sizeof(uint32_t)) {
769                                 error = EINVAL;
770                                 goto parse_out;
771                         }
772                         sc->sc_capa_n_scan_channels =
773                           le32toh(*(const uint32_t *)tlv_data);
774                         break;
775
776                 case IWM_UCODE_TLV_FW_VERSION:
777                         if (tlv_len != sizeof(uint32_t) * 3) {
778                                 error = EINVAL;
779                                 goto parse_out;
780                         }
781                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
782                             "%d.%d.%d",
783                             le32toh(((const uint32_t *)tlv_data)[0]),
784                             le32toh(((const uint32_t *)tlv_data)[1]),
785                             le32toh(((const uint32_t *)tlv_data)[2]));
786                         break;
787
788                 default:
789                         device_printf(sc->sc_dev,
790                             "%s: unknown firmware section %d, abort\n",
791                             __func__, tlv_type);
792                         error = EINVAL;
793                         goto parse_out;
794                 }
795
796                 len -= roundup(tlv_len, 4);
797                 data += roundup(tlv_len, 4);
798         }
799
800         KASSERT(error == 0, ("unhandled error"));
801
802  parse_out:
803         if (error) {
804                 device_printf(sc->sc_dev, "firmware parse error %d, "
805                     "section type %d\n", error, tlv_type);
806         }
807
808         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
809                 device_printf(sc->sc_dev,
810                     "device uses unsupported power ops\n");
811                 error = ENOTSUP;
812         }
813
814  out:
815         if (error) {
816                 fw->fw_status = IWM_FW_STATUS_NONE;
817                 if (fw->fw_fp != NULL)
818                         iwm_fw_info_free(fw);
819         } else
820                 fw->fw_status = IWM_FW_STATUS_DONE;
821         wakeup(&sc->sc_fw);
822
823         return error;
824 }
825
826 /*
827  * DMA resource routines
828  */
829
830 static void
831 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
832 {
833         if (error != 0)
834                 return;
835         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
836         *(bus_addr_t *)arg = segs[0].ds_addr;
837 }
838
839 static int
840 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
841     bus_size_t size, bus_size_t alignment)
842 {
843         int error;
844
845         dma->tag = NULL;
846         dma->map = NULL;
847         dma->size = size;
848         dma->vaddr = NULL;
849
850         error = bus_dma_tag_create(tag, alignment,
851             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
852             1, size, 0, NULL, NULL, &dma->tag);
853         if (error != 0)
854                 goto fail;
855
856         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
857             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
858         if (error != 0)
859                 goto fail;
860
861         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
862             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
863         if (error != 0) {
864                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
865                 dma->vaddr = NULL;
866                 goto fail;
867         }
868
869         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
870
871         return 0;
872
873 fail:
874         iwm_dma_contig_free(dma);
875
876         return error;
877 }
878
879 static void
880 iwm_dma_contig_free(struct iwm_dma_info *dma)
881 {
882         if (dma->vaddr != NULL) {
883                 bus_dmamap_sync(dma->tag, dma->map,
884                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
885                 bus_dmamap_unload(dma->tag, dma->map);
886                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
887                 dma->vaddr = NULL;
888         }
889         if (dma->tag != NULL) {
890                 bus_dma_tag_destroy(dma->tag);
891                 dma->tag = NULL;
892         }
893 }
894
895 /* fwmem is used to load firmware onto the card */
896 static int
897 iwm_alloc_fwmem(struct iwm_softc *sc)
898 {
899         /* Must be aligned on a 16-byte boundary. */
900         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
901             sc->sc_fwdmasegsz, 16);
902 }
903
904 static void
905 iwm_free_fwmem(struct iwm_softc *sc)
906 {
907         iwm_dma_contig_free(&sc->fw_dma);
908 }
909
910 /* tx scheduler rings.  not used? */
911 static int
912 iwm_alloc_sched(struct iwm_softc *sc)
913 {
914         /* TX scheduler rings must be aligned on a 1KB boundary. */
915         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
916             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
917 }
918
919 static void
920 iwm_free_sched(struct iwm_softc *sc)
921 {
922         iwm_dma_contig_free(&sc->sched_dma);
923 }
924
925 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
926 static int
927 iwm_alloc_kw(struct iwm_softc *sc)
928 {
929         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
930 }
931
932 static void
933 iwm_free_kw(struct iwm_softc *sc)
934 {
935         iwm_dma_contig_free(&sc->kw_dma);
936 }
937
938 /* interrupt cause table */
939 static int
940 iwm_alloc_ict(struct iwm_softc *sc)
941 {
942         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
943             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
944 }
945
946 static void
947 iwm_free_ict(struct iwm_softc *sc)
948 {
949         iwm_dma_contig_free(&sc->ict_dma);
950 }
951
952 static int
953 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
954 {
955         bus_size_t size;
956         int i, error;
957
958         ring->cur = 0;
959
960         /* Allocate RX descriptors (256-byte aligned). */
961         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
962         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
963         if (error != 0) {
964                 device_printf(sc->sc_dev,
965                     "could not allocate RX ring DMA memory\n");
966                 goto fail;
967         }
968         ring->desc = ring->desc_dma.vaddr;
969
970         /* Allocate RX status area (16-byte aligned). */
971         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
972             sizeof(*ring->stat), 16);
973         if (error != 0) {
974                 device_printf(sc->sc_dev,
975                     "could not allocate RX status DMA memory\n");
976                 goto fail;
977         }
978         ring->stat = ring->stat_dma.vaddr;
979
980         /* Create RX buffer DMA tag. */
981         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
982             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
983             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
984         if (error != 0) {
985                 device_printf(sc->sc_dev,
986                     "%s: could not create RX buf DMA tag, error %d\n",
987                     __func__, error);
988                 goto fail;
989         }
990
991         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
992         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
993         if (error != 0) {
994                 device_printf(sc->sc_dev,
995                     "%s: could not create RX buf DMA map, error %d\n",
996                     __func__, error);
997                 goto fail;
998         }
999         /*
1000          * Allocate and map RX buffers.
1001          */
1002         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1003                 struct iwm_rx_data *data = &ring->data[i];
1004                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1005                 if (error != 0) {
1006                         device_printf(sc->sc_dev,
1007                             "%s: could not create RX buf DMA map, error %d\n",
1008                             __func__, error);
1009                         goto fail;
1010                 }
1011                 data->m = NULL;
1012
1013                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1014                         goto fail;
1015                 }
1016         }
1017         return 0;
1018
1019 fail:   iwm_free_rx_ring(sc, ring);
1020         return error;
1021 }
1022
1023 static void
1024 iwm_disable_rx_dma(struct iwm_softc *sc)
1025 {
1026         /* XXX conditional nic locks are stupid */
1027         /* XXX print out if we can't lock the NIC? */
1028         if (iwm_nic_lock(sc)) {
1029                 /* XXX handle if RX stop doesn't finish? */
1030                 (void) iwm_pcie_rx_stop(sc);
1031                 iwm_nic_unlock(sc);
1032         }
1033 }
1034
1035 static void
1036 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1037 {
1038         /* Reset the ring state */
1039         ring->cur = 0;
1040
1041         /*
1042          * The hw rx ring index in shared memory must also be cleared,
1043          * otherwise the discrepancy can cause reprocessing chaos.
1044          */
1045         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1046 }
1047
1048 static void
1049 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1050 {
1051         int i;
1052
1053         iwm_dma_contig_free(&ring->desc_dma);
1054         iwm_dma_contig_free(&ring->stat_dma);
1055
1056         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1057                 struct iwm_rx_data *data = &ring->data[i];
1058
1059                 if (data->m != NULL) {
1060                         bus_dmamap_sync(ring->data_dmat, data->map,
1061                             BUS_DMASYNC_POSTREAD);
1062                         bus_dmamap_unload(ring->data_dmat, data->map);
1063                         m_freem(data->m);
1064                         data->m = NULL;
1065                 }
1066                 if (data->map != NULL) {
1067                         bus_dmamap_destroy(ring->data_dmat, data->map);
1068                         data->map = NULL;
1069                 }
1070         }
1071         if (ring->spare_map != NULL) {
1072                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1073                 ring->spare_map = NULL;
1074         }
1075         if (ring->data_dmat != NULL) {
1076                 bus_dma_tag_destroy(ring->data_dmat);
1077                 ring->data_dmat = NULL;
1078         }
1079 }
1080
1081 static int
1082 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1083 {
1084         bus_addr_t paddr;
1085         bus_size_t size;
1086         size_t maxsize;
1087         int nsegments;
1088         int i, error;
1089
1090         ring->qid = qid;
1091         ring->queued = 0;
1092         ring->cur = 0;
1093
1094         /* Allocate TX descriptors (256-byte aligned). */
1095         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1096         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1097         if (error != 0) {
1098                 device_printf(sc->sc_dev,
1099                     "could not allocate TX ring DMA memory\n");
1100                 goto fail;
1101         }
1102         ring->desc = ring->desc_dma.vaddr;
1103
1104         /*
1105          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1106          * to allocate commands space for other rings.
1107          */
1108         if (qid > IWM_MVM_CMD_QUEUE)
1109                 return 0;
1110
1111         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1112         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1113         if (error != 0) {
1114                 device_printf(sc->sc_dev,
1115                     "could not allocate TX cmd DMA memory\n");
1116                 goto fail;
1117         }
1118         ring->cmd = ring->cmd_dma.vaddr;
1119
1120         /* FW commands may require more mapped space than packets. */
1121         if (qid == IWM_MVM_CMD_QUEUE) {
1122                 maxsize = IWM_RBUF_SIZE;
1123                 nsegments = 1;
1124         } else {
1125                 maxsize = MCLBYTES;
1126                 nsegments = IWM_MAX_SCATTER - 2;
1127         }
1128
1129         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1130             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1131             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1132         if (error != 0) {
1133                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1134                 goto fail;
1135         }
1136
1137         paddr = ring->cmd_dma.paddr;
1138         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1139                 struct iwm_tx_data *data = &ring->data[i];
1140
1141                 data->cmd_paddr = paddr;
1142                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1143                     + offsetof(struct iwm_tx_cmd, scratch);
1144                 paddr += sizeof(struct iwm_device_cmd);
1145
1146                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1147                 if (error != 0) {
1148                         device_printf(sc->sc_dev,
1149                             "could not create TX buf DMA map\n");
1150                         goto fail;
1151                 }
1152         }
1153         KASSERT(paddr == ring->cmd_dma.paddr + size,
1154             ("invalid physical address"));
1155         return 0;
1156
1157 fail:   iwm_free_tx_ring(sc, ring);
1158         return error;
1159 }
1160
1161 static void
1162 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1163 {
1164         int i;
1165
1166         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1167                 struct iwm_tx_data *data = &ring->data[i];
1168
1169                 if (data->m != NULL) {
1170                         bus_dmamap_sync(ring->data_dmat, data->map,
1171                             BUS_DMASYNC_POSTWRITE);
1172                         bus_dmamap_unload(ring->data_dmat, data->map);
1173                         m_freem(data->m);
1174                         data->m = NULL;
1175                 }
1176         }
1177         /* Clear TX descriptors. */
1178         memset(ring->desc, 0, ring->desc_dma.size);
1179         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1180             BUS_DMASYNC_PREWRITE);
1181         sc->qfullmsk &= ~(1 << ring->qid);
1182         ring->queued = 0;
1183         ring->cur = 0;
1184 }
1185
1186 static void
1187 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1188 {
1189         int i;
1190
1191         iwm_dma_contig_free(&ring->desc_dma);
1192         iwm_dma_contig_free(&ring->cmd_dma);
1193
1194         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1195                 struct iwm_tx_data *data = &ring->data[i];
1196
1197                 if (data->m != NULL) {
1198                         bus_dmamap_sync(ring->data_dmat, data->map,
1199                             BUS_DMASYNC_POSTWRITE);
1200                         bus_dmamap_unload(ring->data_dmat, data->map);
1201                         m_freem(data->m);
1202                         data->m = NULL;
1203                 }
1204                 if (data->map != NULL) {
1205                         bus_dmamap_destroy(ring->data_dmat, data->map);
1206                         data->map = NULL;
1207                 }
1208         }
1209         if (ring->data_dmat != NULL) {
1210                 bus_dma_tag_destroy(ring->data_dmat);
1211                 ring->data_dmat = NULL;
1212         }
1213 }
1214
1215 /*
1216  * High-level hardware frobbing routines
1217  */
1218
1219 static void
1220 iwm_enable_interrupts(struct iwm_softc *sc)
1221 {
1222         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1223         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1224 }
1225
1226 static void
1227 iwm_restore_interrupts(struct iwm_softc *sc)
1228 {
1229         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1230 }
1231
1232 static void
1233 iwm_disable_interrupts(struct iwm_softc *sc)
1234 {
1235         /* disable interrupts */
1236         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1237
1238         /* acknowledge all interrupts */
1239         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1240         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1241 }
1242
1243 static void
1244 iwm_ict_reset(struct iwm_softc *sc)
1245 {
1246         iwm_disable_interrupts(sc);
1247
1248         /* Reset ICT table. */
1249         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1250         sc->ict_cur = 0;
1251
1252         /* Set physical address of ICT table (4KB aligned). */
1253         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1254             IWM_CSR_DRAM_INT_TBL_ENABLE
1255             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1256             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1257             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1258
1259         /* Switch to ICT interrupt mode in driver. */
1260         sc->sc_flags |= IWM_FLAG_USE_ICT;
1261
1262         /* Re-enable interrupts. */
1263         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1264         iwm_enable_interrupts(sc);
1265 }
1266
1267 /* iwlwifi pcie/trans.c */
1268
1269 /*
1270  * Since this .. hard-resets things, it's time to actually
1271  * mark the first vap (if any) as having no mac context.
1272  * It's annoying, but since the driver is potentially being
1273  * stop/start'ed whilst active (thanks openbsd port!) we
1274  * have to correctly track this.
1275  */
1276 static void
1277 iwm_stop_device(struct iwm_softc *sc)
1278 {
1279         struct ieee80211com *ic = &sc->sc_ic;
1280         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1281         int chnl, qid;
1282         uint32_t mask = 0;
1283
1284         /* tell the device to stop sending interrupts */
1285         iwm_disable_interrupts(sc);
1286
1287         /*
1288          * FreeBSD-local: mark the first vap as not-uploaded,
1289          * so the next transition through auth/assoc
1290          * will correctly populate the MAC context.
1291          */
1292         if (vap) {
1293                 struct iwm_vap *iv = IWM_VAP(vap);
1294                 iv->is_uploaded = 0;
1295         }
1296
1297         /* device going down, Stop using ICT table */
1298         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1299
1300         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1301
1302         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1303
1304         if (iwm_nic_lock(sc)) {
1305                 /* Stop each Tx DMA channel */
1306                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1307                         IWM_WRITE(sc,
1308                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1309                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1310                 }
1311
1312                 /* Wait for DMA channels to be idle */
1313                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1314                     5000)) {
1315                         device_printf(sc->sc_dev,
1316                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1317                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1318                 }
1319                 iwm_nic_unlock(sc);
1320         }
1321         iwm_disable_rx_dma(sc);
1322
1323         /* Stop RX ring. */
1324         iwm_reset_rx_ring(sc, &sc->rxq);
1325
1326         /* Reset all TX rings. */
1327         for (qid = 0; qid < nitems(sc->txq); qid++)
1328                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1329
1330         /*
1331          * Power-down device's busmaster DMA clocks
1332          */
1333         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1334         DELAY(5);
1335
1336         /* Make sure (redundant) we've released our request to stay awake */
1337         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1338             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1339
1340         /* Stop the device, and put it in low power state */
1341         iwm_apm_stop(sc);
1342
1343         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1344          * Clean again the interrupt here
1345          */
1346         iwm_disable_interrupts(sc);
1347         /* stop and reset the on-board processor */
1348         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1349
1350         /*
1351          * Even if we stop the HW, we still want the RF kill
1352          * interrupt
1353          */
1354         iwm_enable_rfkill_int(sc);
1355         iwm_check_rfkill(sc);
1356 }
1357
1358 /* iwlwifi: mvm/ops.c */
1359 static void
1360 iwm_mvm_nic_config(struct iwm_softc *sc)
1361 {
1362         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1363         uint32_t reg_val = 0;
1364
1365         radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1366             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1367         radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1368             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1369         radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1370             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1371
1372         /* SKU control */
1373         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1374             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1375         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1376             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1377
1378         /* radio configuration */
1379         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1380         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1381         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1382
1383         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1384
1385         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1386             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1387             radio_cfg_step, radio_cfg_dash);
1388
1389         /*
1390          * W/A : NIC is stuck in a reset state after Early PCIe power off
1391          * (PCIe power is lost before PERST# is asserted), causing ME FW
1392          * to lose ownership and not being able to obtain it back.
1393          */
1394         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1395                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1396                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1397                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1398         }
1399 }
1400
1401 static int
1402 iwm_nic_rx_init(struct iwm_softc *sc)
1403 {
1404         if (!iwm_nic_lock(sc))
1405                 return EBUSY;
1406
1407         /*
1408          * Initialize RX ring.  This is from the iwn driver.
1409          */
1410         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1411
1412         /* stop DMA */
1413         iwm_disable_rx_dma(sc);
1414         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1415         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1416         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1417         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1418
1419         /* Set physical address of RX ring (256-byte aligned). */
1420         IWM_WRITE(sc,
1421             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1422
1423         /* Set physical address of RX status (16-byte aligned). */
1424         IWM_WRITE(sc,
1425             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1426
1427         /* Enable RX. */
1428         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1429             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1430             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1431             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1432             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1433             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1434             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1435             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1436
1437         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1438
1439         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1440         if (sc->host_interrupt_operation_mode)
1441                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1442
1443         /*
1444          * Thus sayeth el jefe (iwlwifi) via a comment:
1445          *
1446          * This value should initially be 0 (before preparing any
1447          * RBs), should be 8 after preparing the first 8 RBs (for example)
1448          */
1449         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1450
1451         iwm_nic_unlock(sc);
1452
1453         return 0;
1454 }
1455
1456 static int
1457 iwm_nic_tx_init(struct iwm_softc *sc)
1458 {
1459         int qid;
1460
1461         if (!iwm_nic_lock(sc))
1462                 return EBUSY;
1463
1464         /* Deactivate TX scheduler. */
1465         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1466
1467         /* Set physical address of "keep warm" page (16-byte aligned). */
1468         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1469
1470         /* Initialize TX rings. */
1471         for (qid = 0; qid < nitems(sc->txq); qid++) {
1472                 struct iwm_tx_ring *txq = &sc->txq[qid];
1473
1474                 /* Set physical address of TX ring (256-byte aligned). */
1475                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1476                     txq->desc_dma.paddr >> 8);
1477                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1478                     "%s: loading ring %d descriptors (%p) at %lx\n",
1479                     __func__,
1480                     qid, txq->desc,
1481                     (unsigned long) (txq->desc_dma.paddr >> 8));
1482         }
1483
1484         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1485
1486         iwm_nic_unlock(sc);
1487
1488         return 0;
1489 }
1490
1491 static int
1492 iwm_nic_init(struct iwm_softc *sc)
1493 {
1494         int error;
1495
1496         iwm_apm_init(sc);
1497         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1498                 iwm_set_pwr(sc);
1499
1500         iwm_mvm_nic_config(sc);
1501
1502         if ((error = iwm_nic_rx_init(sc)) != 0)
1503                 return error;
1504
1505         /*
1506          * Ditto for TX, from iwn
1507          */
1508         if ((error = iwm_nic_tx_init(sc)) != 0)
1509                 return error;
1510
1511         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1512             "%s: shadow registers enabled\n", __func__);
1513         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1514
1515         return 0;
1516 }
1517
1518 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1519         IWM_MVM_TX_FIFO_VO,
1520         IWM_MVM_TX_FIFO_VI,
1521         IWM_MVM_TX_FIFO_BE,
1522         IWM_MVM_TX_FIFO_BK,
1523 };
1524
1525 static int
1526 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1527 {
1528         if (!iwm_nic_lock(sc)) {
1529                 device_printf(sc->sc_dev,
1530                     "%s: cannot enable txq %d\n",
1531                     __func__,
1532                     qid);
1533                 return EBUSY;
1534         }
1535
1536         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1537
1538         if (qid == IWM_MVM_CMD_QUEUE) {
1539                 /* unactivate before configuration */
1540                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1541                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1542                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1543
1544                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1545
1546                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1547
1548                 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1549                 /* Set scheduler window size and frame limit. */
1550                 iwm_write_mem32(sc,
1551                     sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1552                     sizeof(uint32_t),
1553                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1554                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1555                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1556                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1557
1558                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1559                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1560                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1561                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1562                     IWM_SCD_QUEUE_STTS_REG_MSK);
1563         } else {
1564                 struct iwm_scd_txq_cfg_cmd cmd;
1565                 int error;
1566
1567                 iwm_nic_unlock(sc);
1568
1569                 memset(&cmd, 0, sizeof(cmd));
1570                 cmd.scd_queue = qid;
1571                 cmd.enable = 1;
1572                 cmd.sta_id = sta_id;
1573                 cmd.tx_fifo = fifo;
1574                 cmd.aggregate = 0;
1575                 cmd.window = IWM_FRAME_LIMIT;
1576
1577                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1578                     sizeof(cmd), &cmd);
1579                 if (error) {
1580                         device_printf(sc->sc_dev,
1581                             "cannot enable txq %d\n", qid);
1582                         return error;
1583                 }
1584
1585                 if (!iwm_nic_lock(sc))
1586                         return EBUSY;
1587         }
1588
1589         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1590             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1591
1592         iwm_nic_unlock(sc);
1593
1594         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1595             __func__, qid, fifo);
1596
1597         return 0;
1598 }
1599
1600 static int
1601 iwm_post_alive(struct iwm_softc *sc)
1602 {
1603         int nwords;
1604         int error, chnl;
1605         uint32_t base;
1606
1607         if (!iwm_nic_lock(sc))
1608                 return EBUSY;
1609
1610         base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1611         if (sc->sched_base != base) {
1612                 device_printf(sc->sc_dev,
1613                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1614                     __func__, sc->sched_base, base);
1615         }
1616
1617         iwm_ict_reset(sc);
1618
1619         /* Clear TX scheduler state in SRAM. */
1620         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1621             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1622             / sizeof(uint32_t);
1623         error = iwm_write_mem(sc,
1624             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1625             NULL, nwords);
1626         if (error)
1627                 goto out;
1628
1629         /* Set physical address of TX scheduler rings (1KB aligned). */
1630         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1631
1632         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1633
1634         iwm_nic_unlock(sc);
1635
1636         /* enable command channel */
1637         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1638         if (error)
1639                 return error;
1640
1641         if (!iwm_nic_lock(sc))
1642                 return EBUSY;
1643
1644         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1645
1646         /* Enable DMA channels. */
1647         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1648                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1649                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1650                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1651         }
1652
1653         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1654             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1655
1656         /* Enable L1-Active */
1657         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1658                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1659                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1660         }
1661
1662  out:
1663         iwm_nic_unlock(sc);
1664         return error;
1665 }
1666
1667 /*
1668  * NVM read access and content parsing.  We do not support
1669  * external NVM or writing NVM.
1670  * iwlwifi/mvm/nvm.c
1671  */
1672
1673 /* list of NVM sections we are allowed/need to read */
1674 const int nvm_to_read[] = {
1675         IWM_NVM_SECTION_TYPE_HW,
1676         IWM_NVM_SECTION_TYPE_SW,
1677         IWM_NVM_SECTION_TYPE_REGULATORY,
1678         IWM_NVM_SECTION_TYPE_CALIBRATION,
1679         IWM_NVM_SECTION_TYPE_PRODUCTION,
1680         IWM_NVM_SECTION_TYPE_HW_8000,
1681         IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
1682         IWM_NVM_SECTION_TYPE_PHY_SKU,
1683 };
1684
1685 /* Default NVM size to read */
1686 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1687 #define IWM_MAX_NVM_SECTION_SIZE        8192
1688
1689 #define IWM_NVM_WRITE_OPCODE 1
1690 #define IWM_NVM_READ_OPCODE 0
1691
1692 /* load nvm chunk response */
1693 #define IWM_READ_NVM_CHUNK_SUCCEED              0
1694 #define IWM_READ_NVM_CHUNK_INVALID_ADDRESS      1
1695
1696 static int
1697 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1698         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1699 {
1700         offset = 0;
1701         struct iwm_nvm_access_cmd nvm_access_cmd = {
1702                 .offset = htole16(offset),
1703                 .length = htole16(length),
1704                 .type = htole16(section),
1705                 .op_code = IWM_NVM_READ_OPCODE,
1706         };
1707         struct iwm_nvm_access_resp *nvm_resp;
1708         struct iwm_rx_packet *pkt;
1709         struct iwm_host_cmd cmd = {
1710                 .id = IWM_NVM_ACCESS_CMD,
1711                 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1712                     IWM_CMD_SEND_IN_RFKILL,
1713                 .data = { &nvm_access_cmd, },
1714         };
1715         int ret, offset_read;
1716         size_t bytes_read;
1717         uint8_t *resp_data;
1718
1719         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1720
1721         ret = iwm_send_cmd(sc, &cmd);
1722         if (ret) {
1723                 device_printf(sc->sc_dev,
1724                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1725                 return ret;
1726         }
1727
1728         pkt = cmd.resp_pkt;
1729         if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1730                 device_printf(sc->sc_dev,
1731                     "Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1732                     pkt->hdr.flags);
1733                 ret = EIO;
1734                 goto exit;
1735         }
1736
1737         /* Extract NVM response */
1738         nvm_resp = (void *)pkt->data;
1739
1740         ret = le16toh(nvm_resp->status);
1741         bytes_read = le16toh(nvm_resp->length);
1742         offset_read = le16toh(nvm_resp->offset);
1743         resp_data = nvm_resp->data;
1744         if (ret) {
1745                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1746                     "NVM access command failed with status %d\n", ret);
1747                 ret = EINVAL;
1748                 goto exit;
1749         }
1750
1751         if (offset_read != offset) {
1752                 device_printf(sc->sc_dev,
1753                     "NVM ACCESS response with invalid offset %d\n",
1754                     offset_read);
1755                 ret = EINVAL;
1756                 goto exit;
1757         }
1758
1759         if (bytes_read > length) {
1760                 device_printf(sc->sc_dev,
1761                     "NVM ACCESS response with too much data "
1762                     "(%d bytes requested, %zd bytes received)\n",
1763                     length, bytes_read);
1764                 ret = EINVAL;
1765                 goto exit;
1766         }
1767
1768         memcpy(data + offset, resp_data, bytes_read);
1769         *len = bytes_read;
1770
1771  exit:
1772         iwm_free_resp(sc, &cmd);
1773         return ret;
1774 }
1775
1776 /*
1777  * Reads an NVM section completely.
1778  * NICs prior to 7000 family don't have a real NVM, but just read
1779  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1780  * by uCode, we need to manually check in this case that we don't
1781  * overflow and try to read more than the EEPROM size.
1782  * For 7000 family NICs, we supply the maximal size we can read, and
1783  * the uCode fills the response with as much data as we can,
1784  * without overflowing, so no check is needed.
1785  */
1786 static int
1787 iwm_nvm_read_section(struct iwm_softc *sc,
1788         uint16_t section, uint8_t *data, uint16_t *len, size_t max_len)
1789 {
1790         uint16_t chunklen, seglen;
1791         int error = 0;
1792
1793         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1794             "reading NVM section %d\n", section);
1795
1796         chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1797         *len = 0;
1798
1799         /* Read NVM chunks until exhausted (reading less than requested) */
1800         while (seglen == chunklen && *len < max_len) {
1801                 error = iwm_nvm_read_chunk(sc,
1802                     section, *len, chunklen, data, &seglen);
1803                 if (error) {
1804                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1805                             "Cannot read from NVM section "
1806                             "%d at offset %d\n", section, *len);
1807                         return error;
1808                 }
1809                 *len += seglen;
1810         }
1811
1812         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1813             "NVM section %d read completed (%d bytes, error=%d)\n",
1814             section, *len, error);
1815         return error;
1816 }
1817
1818 /*
1819  * BEGIN IWM_NVM_PARSE
1820  */
1821
1822 /* iwlwifi/iwl-nvm-parse.c */
1823
1824 /* NVM offsets (in words) definitions */
1825 enum iwm_nvm_offsets {
1826         /* NVM HW-Section offset (in words) definitions */
1827         IWM_HW_ADDR = 0x15,
1828
1829 /* NVM SW-Section offset (in words) definitions */
1830         IWM_NVM_SW_SECTION = 0x1C0,
1831         IWM_NVM_VERSION = 0,
1832         IWM_RADIO_CFG = 1,
1833         IWM_SKU = 2,
1834         IWM_N_HW_ADDRS = 3,
1835         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1836
1837 /* NVM calibration section offset (in words) definitions */
1838         IWM_NVM_CALIB_SECTION = 0x2B8,
1839         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1840 };
1841
1842 enum iwm_8000_nvm_offsets {
1843         /* NVM HW-Section offset (in words) definitions */
1844         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1845         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1846         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1847         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1848         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1849
1850         /* NVM SW-Section offset (in words) definitions */
1851         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1852         IWM_NVM_VERSION_8000 = 0,
1853         IWM_RADIO_CFG_8000 = 0,
1854         IWM_SKU_8000 = 2,
1855         IWM_N_HW_ADDRS_8000 = 3,
1856
1857         /* NVM REGULATORY -Section offset (in words) definitions */
1858         IWM_NVM_CHANNELS_8000 = 0,
1859         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1860         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1861         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1862
1863         /* NVM calibration section offset (in words) definitions */
1864         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1865         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1866 };
1867
1868 /* SKU Capabilities (actual values from NVM definition) */
1869 enum nvm_sku_bits {
1870         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1871         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1872         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1873         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1874 };
1875
1876 /* radio config bits (actual values from NVM definition) */
1877 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1878 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1879 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1880 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1881 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1882 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1883
1884 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1885 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1886 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1887 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1888 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1889 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1890
1891 #define DEFAULT_MAX_TX_POWER 16
1892
1893 /**
1894  * enum iwm_nvm_channel_flags - channel flags in NVM
1895  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1896  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1897  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1898  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1899  * XXX cannot find this (DFS) flag in iwl-nvm-parse.c
1900  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1901  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1902  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1903  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1904  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1905  */
1906 enum iwm_nvm_channel_flags {
1907         IWM_NVM_CHANNEL_VALID = (1 << 0),
1908         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1909         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1910         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1911         IWM_NVM_CHANNEL_DFS = (1 << 7),
1912         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1913         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1914         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1915         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1916 };
1917
1918 /*
1919  * Translate EEPROM flags to net80211.
1920  */
1921 static uint32_t
1922 iwm_eeprom_channel_flags(uint16_t ch_flags)
1923 {
1924         uint32_t nflags;
1925
1926         nflags = 0;
1927         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1928                 nflags |= IEEE80211_CHAN_PASSIVE;
1929         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1930                 nflags |= IEEE80211_CHAN_NOADHOC;
1931         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1932                 nflags |= IEEE80211_CHAN_DFS;
1933                 /* Just in case. */
1934                 nflags |= IEEE80211_CHAN_NOADHOC;
1935         }
1936
1937         return (nflags);
1938 }
1939
1940 static void
1941 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1942     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1943     const uint8_t bands[])
1944 {
1945         const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
1946         uint32_t nflags;
1947         uint16_t ch_flags;
1948         uint8_t ieee;
1949         int error;
1950
1951         for (; ch_idx < ch_num; ch_idx++) {
1952                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1953                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1954                         ieee = iwm_nvm_channels[ch_idx];
1955                 else
1956                         ieee = iwm_nvm_channels_8000[ch_idx];
1957
1958                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1959                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1960                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
1961                             ieee, ch_flags,
1962                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1963                             "5.2" : "2.4");
1964                         continue;
1965                 }
1966
1967                 nflags = iwm_eeprom_channel_flags(ch_flags);
1968                 error = ieee80211_add_channel(chans, maxchans, nchans,
1969                     ieee, 0, 0, nflags, bands);
1970                 if (error != 0)
1971                         break;
1972
1973                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1974                     "Ch. %d Flags %x [%sGHz] - Added\n",
1975                     ieee, ch_flags,
1976                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1977                     "5.2" : "2.4");
1978         }
1979 }
1980
1981 static void
1982 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1983     struct ieee80211_channel chans[])
1984 {
1985         struct iwm_softc *sc = ic->ic_softc;
1986         struct iwm_nvm_data *data = &sc->sc_nvm;
1987         uint8_t bands[IEEE80211_MODE_BYTES];
1988         size_t ch_num;
1989
1990         memset(bands, 0, sizeof(bands));
1991         /* 1-13: 11b/g channels. */
1992         setbit(bands, IEEE80211_MODE_11B);
1993         setbit(bands, IEEE80211_MODE_11G);
1994         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1995             IWM_NUM_2GHZ_CHANNELS - 1, bands);
1996
1997         /* 14: 11b channel only. */
1998         clrbit(bands, IEEE80211_MODE_11G);
1999         iwm_add_channel_band(sc, chans, maxchans, nchans,
2000             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2001
2002         if (data->sku_cap_band_52GHz_enable) {
2003                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2004                         ch_num = nitems(iwm_nvm_channels);
2005                 else
2006                         ch_num = nitems(iwm_nvm_channels_8000);
2007                 memset(bands, 0, sizeof(bands));
2008                 setbit(bands, IEEE80211_MODE_11A);
2009                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2010                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2011         }
2012 }
2013
2014 static void
2015 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2016         const uint16_t *mac_override, const uint16_t *nvm_hw)
2017 {
2018         const uint8_t *hw_addr;
2019
2020         if (mac_override) {
2021                 static const uint8_t reserved_mac[] = {
2022                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2023                 };
2024
2025                 hw_addr = (const uint8_t *)(mac_override +
2026                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2027
2028                 /*
2029                  * Store the MAC address from MAO section.
2030                  * No byte swapping is required in MAO section
2031                  */
2032                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2033
2034                 /*
2035                  * Force the use of the OTP MAC address in case of reserved MAC
2036                  * address in the NVM, or if address is given but invalid.
2037                  */
2038                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2039                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2040                     iwm_is_valid_ether_addr(data->hw_addr) &&
2041                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2042                         return;
2043
2044                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2045                     "%s: mac address from nvm override section invalid\n",
2046                     __func__);
2047         }
2048
2049         if (nvm_hw) {
2050                 /* read the mac address from WFMP registers */
2051                 uint32_t mac_addr0 =
2052                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2053                 uint32_t mac_addr1 =
2054                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2055
2056                 hw_addr = (const uint8_t *)&mac_addr0;
2057                 data->hw_addr[0] = hw_addr[3];
2058                 data->hw_addr[1] = hw_addr[2];
2059                 data->hw_addr[2] = hw_addr[1];
2060                 data->hw_addr[3] = hw_addr[0];
2061
2062                 hw_addr = (const uint8_t *)&mac_addr1;
2063                 data->hw_addr[4] = hw_addr[1];
2064                 data->hw_addr[5] = hw_addr[0];
2065
2066                 return;
2067         }
2068
2069         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2070         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2071 }
2072
2073 static int
2074 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2075             const uint16_t *phy_sku)
2076 {
2077         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2078                 return le16_to_cpup(nvm_sw + IWM_SKU);
2079
2080         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2081 }
2082
2083 static int
2084 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2085 {
2086         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2087                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2088         else
2089                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2090                                                 IWM_NVM_VERSION_8000));
2091 }
2092
2093 static int
2094 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2095                   const uint16_t *phy_sku)
2096 {
2097         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2098                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2099
2100         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2101 }
2102
2103 static int
2104 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2105 {
2106         int n_hw_addr;
2107
2108         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2109                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2110
2111         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2112
2113         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2114 }
2115
2116 static void
2117 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2118                   uint32_t radio_cfg)
2119 {
2120         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2121                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2122                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2123                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2124                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2125                 return;
2126         }
2127
2128         /* set the radio configuration for family 8000 */
2129         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2130         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2131         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2132         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2133         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2134         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2135 }
2136
2137 static int
2138 iwm_parse_nvm_data(struct iwm_softc *sc,
2139                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2140                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2141                    const uint16_t *phy_sku, const uint16_t *regulatory)
2142 {
2143         struct iwm_nvm_data *data = &sc->sc_nvm;
2144         uint8_t hw_addr[IEEE80211_ADDR_LEN];
2145         uint32_t sku, radio_cfg;
2146
2147         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2148
2149         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2150         iwm_set_radio_cfg(sc, data, radio_cfg);
2151
2152         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2153         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2154         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2155         data->sku_cap_11n_enable = 0;
2156
2157         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2158
2159         /* The byte order is little endian 16 bit, meaning 214365 */
2160         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2161                 IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
2162                 data->hw_addr[0] = hw_addr[1];
2163                 data->hw_addr[1] = hw_addr[0];
2164                 data->hw_addr[2] = hw_addr[3];
2165                 data->hw_addr[3] = hw_addr[2];
2166                 data->hw_addr[4] = hw_addr[5];
2167                 data->hw_addr[5] = hw_addr[4];
2168         } else {
2169                 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2170         }
2171
2172         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2173                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2174                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2175         } else {
2176                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2177                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2178         }
2179         data->calib_version = 255;   /* TODO:
2180                                         this value will prevent some checks from
2181                                         failing, we need to check if this
2182                                         field is still needed, and if it does,
2183                                         where is it in the NVM */
2184
2185         return 0;
2186 }
2187
2188 /*
2189  * END NVM PARSE
2190  */
2191
2192 static int
2193 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2194 {
2195         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2196
2197         /* Checking for required sections */
2198         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2199                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2200                     !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2201                         device_printf(sc->sc_dev,
2202                             "Can't parse empty OTP/NVM sections\n");
2203                         return ENOENT;
2204                 }
2205
2206                 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2207         } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2208                 /* SW and REGULATORY sections are mandatory */
2209                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2210                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2211                         device_printf(sc->sc_dev,
2212                             "Can't parse empty OTP/NVM sections\n");
2213                         return ENOENT;
2214                 }
2215                 /* MAC_OVERRIDE or at least HW section must exist */
2216                 if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2217                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2218                         device_printf(sc->sc_dev,
2219                             "Can't parse mac_address, empty sections\n");
2220                         return ENOENT;
2221                 }
2222
2223                 /* PHY_SKU section is mandatory in B0 */
2224                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2225                         device_printf(sc->sc_dev,
2226                             "Can't parse phy_sku in B0, empty sections\n");
2227                         return ENOENT;
2228                 }
2229
2230                 hw = (const uint16_t *)
2231                     sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2232         } else {
2233                 panic("unknown device family %d\n", sc->sc_device_family);
2234         }
2235
2236         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2237         calib = (const uint16_t *)
2238             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2239         regulatory = (const uint16_t *)
2240             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2241         mac_override = (const uint16_t *)
2242             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2243         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2244
2245         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2246             phy_sku, regulatory);
2247 }
2248
2249 static int
2250 iwm_nvm_init(struct iwm_softc *sc)
2251 {
2252         struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2253         int i, section, error;
2254         uint16_t len;
2255         uint8_t *buf;
2256         const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
2257
2258         memset(nvm_sections, 0 , sizeof(nvm_sections));
2259
2260         buf = malloc(bufsz, M_DEVBUF, M_NOWAIT);
2261         if (buf == NULL)
2262                 return ENOMEM;
2263
2264         for (i = 0; i < nitems(nvm_to_read); i++) {
2265                 section = nvm_to_read[i];
2266                 KASSERT(section <= nitems(nvm_sections),
2267                     ("too many sections"));
2268
2269                 error = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2270                 if (error) {
2271                         error = 0;
2272                         continue;
2273                 }
2274                 nvm_sections[section].data = malloc(len, M_DEVBUF, M_NOWAIT);
2275                 if (nvm_sections[section].data == NULL) {
2276                         error = ENOMEM;
2277                         break;
2278                 }
2279                 memcpy(nvm_sections[section].data, buf, len);
2280                 nvm_sections[section].length = len;
2281         }
2282         free(buf, M_DEVBUF);
2283         if (error == 0)
2284                 error = iwm_parse_nvm_sections(sc, nvm_sections);
2285
2286         for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2287                 if (nvm_sections[i].data != NULL)
2288                         free(nvm_sections[i].data, M_DEVBUF);
2289         }
2290
2291         return error;
2292 }
2293
2294 /*
2295  * Firmware loading gunk.  This is kind of a weird hybrid between the
2296  * iwn driver and the Linux iwlwifi driver.
2297  */
2298
2299 static int
2300 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2301         const uint8_t *section, uint32_t byte_cnt)
2302 {
2303         int error = EINVAL;
2304         uint32_t chunk_sz, offset;
2305
2306         chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2307
2308         for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2309                 uint32_t addr, len;
2310                 const uint8_t *data;
2311
2312                 addr = dst_addr + offset;
2313                 len = MIN(chunk_sz, byte_cnt - offset);
2314                 data = section + offset;
2315
2316                 error = iwm_firmware_load_chunk(sc, addr, data, len);
2317                 if (error)
2318                         break;
2319         }
2320
2321         return error;
2322 }
2323
2324 static int
2325 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2326         const uint8_t *chunk, uint32_t byte_cnt)
2327 {
2328         struct iwm_dma_info *dma = &sc->fw_dma;
2329         int error;
2330
2331         /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2332         memcpy(dma->vaddr, chunk, byte_cnt);
2333         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2334
2335         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2336             dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2337                 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2338                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2339         }
2340
2341         sc->sc_fw_chunk_done = 0;
2342
2343         if (!iwm_nic_lock(sc))
2344                 return EBUSY;
2345
2346         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2347             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2348         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2349             dst_addr);
2350         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2351             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2352         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2353             (iwm_get_dma_hi_addr(dma->paddr)
2354               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2355         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2356             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2357             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2358             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2359         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2360             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2361             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2362             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2363
2364         iwm_nic_unlock(sc);
2365
2366         /* wait 1s for this segment to load */
2367         while (!sc->sc_fw_chunk_done)
2368                 if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
2369                         break;
2370
2371         if (!sc->sc_fw_chunk_done) {
2372                 device_printf(sc->sc_dev,
2373                     "fw chunk addr 0x%x len %d failed to load\n",
2374                     dst_addr, byte_cnt);
2375         }
2376
2377         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2378             dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2379                 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2380                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2381                 iwm_nic_unlock(sc);
2382         }
2383
2384         return error;
2385 }
2386
2387 int
2388 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2389     int cpu, int *first_ucode_section)
2390 {
2391         int shift_param;
2392         int i, error = 0, sec_num = 0x1;
2393         uint32_t val, last_read_idx = 0;
2394         const void *data;
2395         uint32_t dlen;
2396         uint32_t offset;
2397
2398         if (cpu == 1) {
2399                 shift_param = 0;
2400                 *first_ucode_section = 0;
2401         } else {
2402                 shift_param = 16;
2403                 (*first_ucode_section)++;
2404         }
2405
2406         for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
2407                 last_read_idx = i;
2408                 data = fws->fw_sect[i].fws_data;
2409                 dlen = fws->fw_sect[i].fws_len;
2410                 offset = fws->fw_sect[i].fws_devoff;
2411
2412                 /*
2413                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2414                  * CPU1 to CPU2.
2415                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2416                  * CPU2 non paged to CPU2 paging sec.
2417                  */
2418                 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2419                     offset == IWM_PAGING_SEPARATOR_SECTION)
2420                         break;
2421
2422                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2423                     "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2424                     i, offset, dlen, cpu);
2425
2426                 if (dlen > sc->sc_fwdmasegsz) {
2427                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2428                             "chunk %d too large (%d bytes)\n", i, dlen);
2429                         error = EFBIG;
2430                 } else {
2431                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2432                 }
2433                 if (error) {
2434                         device_printf(sc->sc_dev,
2435                             "could not load firmware chunk %d (error %d)\n",
2436                             i, error);
2437                         return error;
2438                 }
2439
2440                 /* Notify the ucode of the loaded section number and status */
2441                 if (iwm_nic_lock(sc)) {
2442                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2443                         val = val | (sec_num << shift_param);
2444                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2445                         sec_num = (sec_num << 1) | 0x1;
2446                         iwm_nic_unlock(sc);
2447
2448                         /*
2449                          * The firmware won't load correctly without this delay.
2450                          */
2451                         DELAY(8000);
2452                 }
2453         }
2454
2455         *first_ucode_section = last_read_idx;
2456
2457         if (iwm_nic_lock(sc)) {
2458                 if (cpu == 1)
2459                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2460                 else
2461                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2462                 iwm_nic_unlock(sc);
2463         }
2464
2465         return 0;
2466 }
2467
2468 int
2469 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2470 {
2471         struct iwm_fw_sects *fws;
2472         int error = 0;
2473         int first_ucode_section;
2474
2475         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2476             ucode_type);
2477
2478         fws = &sc->sc_fw.fw_sects[ucode_type];
2479
2480         /* configure the ucode to be ready to get the secured image */
2481         /* release CPU reset */
2482         iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2483
2484         /* load to FW the binary Secured sections of CPU1 */
2485         error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2486         if (error)
2487                 return error;
2488
2489         /* load to FW the binary sections of CPU2 */
2490         return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2491 }
2492
2493 static int
2494 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2495 {
2496         struct iwm_fw_sects *fws;
2497         int error, i;
2498         const void *data;
2499         uint32_t dlen;
2500         uint32_t offset;
2501
2502         sc->sc_uc.uc_intr = 0;
2503
2504         fws = &sc->sc_fw.fw_sects[ucode_type];
2505         for (i = 0; i < fws->fw_count; i++) {
2506                 data = fws->fw_sect[i].fws_data;
2507                 dlen = fws->fw_sect[i].fws_len;
2508                 offset = fws->fw_sect[i].fws_devoff;
2509                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2510                     "LOAD FIRMWARE type %d offset %u len %d\n",
2511                     ucode_type, offset, dlen);
2512                 if (dlen > sc->sc_fwdmasegsz) {
2513                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2514                             "chunk %d too large (%d bytes)\n", i, dlen);
2515                         error = EFBIG;
2516                 } else {
2517                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2518                 }
2519                 if (error) {
2520                         device_printf(sc->sc_dev,
2521                             "could not load firmware chunk %u of %u "
2522                             "(error=%d)\n", i, fws->fw_count, error);
2523                         return error;
2524                 }
2525         }
2526
2527         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2528
2529         return 0;
2530 }
2531
2532 static int
2533 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2534 {
2535         int error, w;
2536
2537         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
2538                 error = iwm_load_firmware_8000(sc, ucode_type);
2539         else
2540                 error = iwm_load_firmware_7000(sc, ucode_type);
2541         if (error)
2542                 return error;
2543
2544         /* wait for the firmware to load */
2545         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2546                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2547         }
2548         if (error || !sc->sc_uc.uc_ok) {
2549                 device_printf(sc->sc_dev, "could not load firmware\n");
2550                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2551                         device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2552                             iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2553                         device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2554                             iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2555                 }
2556         }
2557
2558         /*
2559          * Give the firmware some time to initialize.
2560          * Accessing it too early causes errors.
2561          */
2562         msleep(&w, &sc->sc_mtx, 0, "iwmfwinit", hz);
2563
2564         return error;
2565 }
2566
2567 /* iwlwifi: pcie/trans.c */
2568 static int
2569 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2570 {
2571         int error;
2572
2573         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2574
2575         if ((error = iwm_nic_init(sc)) != 0) {
2576                 device_printf(sc->sc_dev, "unable to init nic\n");
2577                 return error;
2578         }
2579
2580         /* make sure rfkill handshake bits are cleared */
2581         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2582         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2583             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2584
2585         /* clear (again), then enable host interrupts */
2586         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2587         iwm_enable_interrupts(sc);
2588
2589         /* really make sure rfkill handshake bits are cleared */
2590         /* maybe we should write a few times more?  just to make sure */
2591         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2592         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2593
2594         /* Load the given image to the HW */
2595         return iwm_load_firmware(sc, ucode_type);
2596 }
2597
2598 static int
2599 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2600 {
2601         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2602                 .valid = htole32(valid_tx_ant),
2603         };
2604
2605         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2606             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2607 }
2608
2609 /* iwlwifi: mvm/fw.c */
2610 static int
2611 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2612 {
2613         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2614         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2615
2616         /* Set parameters */
2617         phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2618         phy_cfg_cmd.calib_control.event_trigger =
2619             sc->sc_default_calib[ucode_type].event_trigger;
2620         phy_cfg_cmd.calib_control.flow_trigger =
2621             sc->sc_default_calib[ucode_type].flow_trigger;
2622
2623         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2624             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2625         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2626             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2627 }
2628
2629 static int
2630 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2631         enum iwm_ucode_type ucode_type)
2632 {
2633         enum iwm_ucode_type old_type = sc->sc_uc_current;
2634         int error;
2635
2636         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2637                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2638                         error);
2639                 return error;
2640         }
2641
2642         sc->sc_uc_current = ucode_type;
2643         error = iwm_start_fw(sc, ucode_type);
2644         if (error) {
2645                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2646                 sc->sc_uc_current = old_type;
2647                 return error;
2648         }
2649
2650         error = iwm_post_alive(sc);
2651         if (error) {
2652                 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2653         }
2654         return error;
2655 }
2656
2657 /*
2658  * mvm misc bits
2659  */
2660
2661 /*
2662  * follows iwlwifi/fw.c
2663  */
2664 static int
2665 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2666 {
2667         int error;
2668
2669         /* do not operate with rfkill switch turned on */
2670         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2671                 device_printf(sc->sc_dev,
2672                     "radio is disabled by hardware switch\n");
2673                 return EPERM;
2674         }
2675
2676         sc->sc_init_complete = 0;
2677         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2678             IWM_UCODE_TYPE_INIT)) != 0) {
2679                 device_printf(sc->sc_dev, "failed to load init firmware\n");
2680                 return error;
2681         }
2682
2683         if (justnvm) {
2684                 if ((error = iwm_nvm_init(sc)) != 0) {
2685                         device_printf(sc->sc_dev, "failed to read nvm\n");
2686                         return error;
2687                 }
2688                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2689
2690                 return 0;
2691         }
2692
2693         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
2694                 device_printf(sc->sc_dev,
2695                     "failed to send bt coex configuration: %d\n", error);
2696                 return error;
2697         }
2698
2699         /* Init Smart FIFO. */
2700         error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2701         if (error != 0)
2702                 return error;
2703
2704         /* Send TX valid antennas before triggering calibrations */
2705         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
2706                 device_printf(sc->sc_dev,
2707                     "failed to send antennas before calibration: %d\n", error);
2708                 return error;
2709         }
2710
2711         /*
2712          * Send phy configurations command to init uCode
2713          * to start the 16.0 uCode init image internal calibrations.
2714          */
2715         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2716                 device_printf(sc->sc_dev,
2717                     "%s: failed to run internal calibration: %d\n",
2718                     __func__, error);
2719                 return error;
2720         }
2721
2722         /*
2723          * Nothing to do but wait for the init complete notification
2724          * from the firmware
2725          */
2726         while (!sc->sc_init_complete) {
2727                 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2728                                  0, "iwminit", 2*hz);
2729                 if (error) {
2730                         device_printf(sc->sc_dev, "init complete failed: %d\n",
2731                                 sc->sc_init_complete);
2732                         break;
2733                 }
2734         }
2735
2736         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
2737             sc->sc_init_complete ? "" : "not ");
2738
2739         return error;
2740 }
2741
2742 /*
2743  * receive side
2744  */
2745
2746 /* (re)stock rx ring, called at init-time and at runtime */
2747 static int
2748 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2749 {
2750         struct iwm_rx_ring *ring = &sc->rxq;
2751         struct iwm_rx_data *data = &ring->data[idx];
2752         struct mbuf *m;
2753         bus_dmamap_t dmamap = NULL;
2754         bus_dma_segment_t seg;
2755         int nsegs, error;
2756
2757         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2758         if (m == NULL)
2759                 return ENOBUFS;
2760
2761         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2762         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2763             &seg, &nsegs, BUS_DMA_NOWAIT);
2764         if (error != 0) {
2765                 device_printf(sc->sc_dev,
2766                     "%s: can't map mbuf, error %d\n", __func__, error);
2767                 goto fail;
2768         }
2769
2770         if (data->m != NULL)
2771                 bus_dmamap_unload(ring->data_dmat, data->map);
2772
2773         /* Swap ring->spare_map with data->map */
2774         dmamap = data->map;
2775         data->map = ring->spare_map;
2776         ring->spare_map = dmamap;
2777
2778         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2779         data->m = m;
2780
2781         /* Update RX descriptor. */
2782         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
2783         ring->desc[idx] = htole32(seg.ds_addr >> 8);
2784         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2785             BUS_DMASYNC_PREWRITE);
2786
2787         return 0;
2788 fail:
2789         m_freem(m);
2790         return error;
2791 }
2792
2793 /* iwlwifi: mvm/rx.c */
2794 #define IWM_RSSI_OFFSET 50
2795 static int
2796 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2797 {
2798         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2799         uint32_t agc_a, agc_b;
2800         uint32_t val;
2801
2802         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2803         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2804         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2805
2806         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2807         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2808         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2809
2810         /*
2811          * dBm = rssi dB - agc dB - constant.
2812          * Higher AGC (higher radio gain) means lower signal.
2813          */
2814         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2815         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2816         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2817
2818         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2819             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2820             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2821
2822         return max_rssi_dbm;
2823 }
2824
2825 /* iwlwifi: mvm/rx.c */
2826 /*
2827  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2828  * values are reported by the fw as positive values - need to negate
2829  * to obtain their dBM.  Account for missing antennas by replacing 0
2830  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2831  */
2832 static int
2833 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2834 {
2835         int energy_a, energy_b, energy_c, max_energy;
2836         uint32_t val;
2837
2838         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2839         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2840             IWM_RX_INFO_ENERGY_ANT_A_POS;
2841         energy_a = energy_a ? -energy_a : -256;
2842         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2843             IWM_RX_INFO_ENERGY_ANT_B_POS;
2844         energy_b = energy_b ? -energy_b : -256;
2845         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2846             IWM_RX_INFO_ENERGY_ANT_C_POS;
2847         energy_c = energy_c ? -energy_c : -256;
2848         max_energy = MAX(energy_a, energy_b);
2849         max_energy = MAX(max_energy, energy_c);
2850
2851         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2852             "energy In A %d B %d C %d , and max %d\n",
2853             energy_a, energy_b, energy_c, max_energy);
2854
2855         return max_energy;
2856 }
2857
2858 static void
2859 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2860         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2861 {
2862         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2863
2864         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2865         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2866
2867         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2868 }
2869
2870 /*
2871  * Retrieve the average noise (in dBm) among receivers.
2872  */
2873 static int
2874 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2875 {
2876         int i, total, nbant, noise;
2877
2878         total = nbant = noise = 0;
2879         for (i = 0; i < 3; i++) {
2880                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2881                 if (noise) {
2882                         total += noise;
2883                         nbant++;
2884                 }
2885         }
2886
2887         /* There should be at least one antenna but check anyway. */
2888         return (nbant == 0) ? -127 : (total / nbant) - 107;
2889 }
2890
2891 /*
2892  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2893  *
2894  * Handles the actual data of the Rx packet from the fw
2895  */
2896 static void
2897 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2898         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2899 {
2900         struct ieee80211com *ic = &sc->sc_ic;
2901         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2902         struct ieee80211_frame *wh;
2903         struct ieee80211_node *ni;
2904         struct ieee80211_rx_stats rxs;
2905         struct mbuf *m;
2906         struct iwm_rx_phy_info *phy_info;
2907         struct iwm_rx_mpdu_res_start *rx_res;
2908         uint32_t len;
2909         uint32_t rx_pkt_status;
2910         int rssi;
2911
2912         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2913
2914         phy_info = &sc->sc_last_phy_info;
2915         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2916         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2917         len = le16toh(rx_res->byte_count);
2918         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2919
2920         m = data->m;
2921         m->m_data = pkt->data + sizeof(*rx_res);
2922         m->m_pkthdr.len = m->m_len = len;
2923
2924         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2925                 device_printf(sc->sc_dev,
2926                     "dsp size out of range [0,20]: %d\n",
2927                     phy_info->cfg_phy_cnt);
2928                 return;
2929         }
2930
2931         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2932             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2933                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2934                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2935                 return; /* drop */
2936         }
2937
2938         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2939                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2940         } else {
2941                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
2942         }
2943         rssi = (0 - IWM_MIN_DBM) + rssi;        /* normalize */
2944         rssi = MIN(rssi, sc->sc_max_rssi);      /* clip to max. 100% */
2945
2946         /* replenish ring for the buffer we're going to feed to the sharks */
2947         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
2948                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
2949                     __func__);
2950                 return;
2951         }
2952
2953         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2954
2955         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2956             "%s: phy_info: channel=%d, flags=0x%08x\n",
2957             __func__,
2958             le16toh(phy_info->channel),
2959             le16toh(phy_info->phy_flags));
2960
2961         /*
2962          * Populate an RX state struct with the provided information.
2963          */
2964         bzero(&rxs, sizeof(rxs));
2965         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
2966         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
2967         rxs.c_ieee = le16toh(phy_info->channel);
2968         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2969                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
2970         } else {
2971                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
2972         }
2973         rxs.rssi = rssi - sc->sc_noise;
2974         rxs.nf = sc->sc_noise;
2975
2976         if (ieee80211_radiotap_active_vap(vap)) {
2977                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
2978
2979                 tap->wr_flags = 0;
2980                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
2981                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2982                 tap->wr_chan_freq = htole16(rxs.c_freq);
2983                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
2984                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
2985                 tap->wr_dbm_antsignal = (int8_t)rssi;
2986                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
2987                 tap->wr_tsft = phy_info->system_timestamp;
2988                 switch (phy_info->rate) {
2989                 /* CCK rates. */
2990                 case  10: tap->wr_rate =   2; break;
2991                 case  20: tap->wr_rate =   4; break;
2992                 case  55: tap->wr_rate =  11; break;
2993                 case 110: tap->wr_rate =  22; break;
2994                 /* OFDM rates. */
2995                 case 0xd: tap->wr_rate =  12; break;
2996                 case 0xf: tap->wr_rate =  18; break;
2997                 case 0x5: tap->wr_rate =  24; break;
2998                 case 0x7: tap->wr_rate =  36; break;
2999                 case 0x9: tap->wr_rate =  48; break;
3000                 case 0xb: tap->wr_rate =  72; break;
3001                 case 0x1: tap->wr_rate =  96; break;
3002                 case 0x3: tap->wr_rate = 108; break;
3003                 /* Unknown rate: should not happen. */
3004                 default:  tap->wr_rate =   0;
3005                 }
3006         }
3007
3008         IWM_UNLOCK(sc);
3009         if (ni != NULL) {
3010                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3011                 ieee80211_input_mimo(ni, m, &rxs);
3012                 ieee80211_free_node(ni);
3013         } else {
3014                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3015                 ieee80211_input_mimo_all(ic, m, &rxs);
3016         }
3017         IWM_LOCK(sc);
3018 }
3019
3020 static int
3021 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3022         struct iwm_node *in)
3023 {
3024         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3025         struct ieee80211_node *ni = &in->in_ni;
3026         struct ieee80211vap *vap = ni->ni_vap;
3027         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3028         int failack = tx_resp->failure_frame;
3029
3030         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3031
3032         /* Update rate control statistics. */
3033         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3034             __func__,
3035             (int) le16toh(tx_resp->status.status),
3036             (int) le16toh(tx_resp->status.sequence),
3037             tx_resp->frame_count,
3038             tx_resp->bt_kill_count,
3039             tx_resp->failure_rts,
3040             tx_resp->failure_frame,
3041             le32toh(tx_resp->initial_rate),
3042             (int) le16toh(tx_resp->wireless_media_time));
3043
3044         if (status != IWM_TX_STATUS_SUCCESS &&
3045             status != IWM_TX_STATUS_DIRECT_DONE) {
3046                 ieee80211_ratectl_tx_complete(vap, ni,
3047                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3048                 return (1);
3049         } else {
3050                 ieee80211_ratectl_tx_complete(vap, ni,
3051                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3052                 return (0);
3053         }
3054 }
3055
3056 static void
3057 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3058         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3059 {
3060         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3061         int idx = cmd_hdr->idx;
3062         int qid = cmd_hdr->qid;
3063         struct iwm_tx_ring *ring = &sc->txq[qid];
3064         struct iwm_tx_data *txd = &ring->data[idx];
3065         struct iwm_node *in = txd->in;
3066         struct mbuf *m = txd->m;
3067         int status;
3068
3069         KASSERT(txd->done == 0, ("txd not done"));
3070         KASSERT(txd->in != NULL, ("txd without node"));
3071         KASSERT(txd->m != NULL, ("txd without mbuf"));
3072
3073         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3074
3075         sc->sc_tx_timer = 0;
3076
3077         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3078
3079         /* Unmap and free mbuf. */
3080         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3081         bus_dmamap_unload(ring->data_dmat, txd->map);
3082
3083         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3084             "free txd %p, in %p\n", txd, txd->in);
3085         txd->done = 1;
3086         txd->m = NULL;
3087         txd->in = NULL;
3088
3089         ieee80211_tx_complete(&in->in_ni, m, status);
3090
3091         if (--ring->queued < IWM_TX_RING_LOMARK) {
3092                 sc->qfullmsk &= ~(1 << ring->qid);
3093                 if (sc->qfullmsk == 0) {
3094                         /*
3095                          * Well, we're in interrupt context, but then again
3096                          * I guess net80211 does all sorts of stunts in
3097                          * interrupt context, so maybe this is no biggie.
3098                          */
3099                         iwm_start(sc);
3100                 }
3101         }
3102 }
3103
3104 /*
3105  * transmit side
3106  */
3107
3108 /*
3109  * Process a "command done" firmware notification.  This is where we wakeup
3110  * processes waiting for a synchronous command completion.
3111  * from if_iwn
3112  */
3113 static void
3114 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3115 {
3116         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3117         struct iwm_tx_data *data;
3118
3119         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3120                 return; /* Not a command ack. */
3121         }
3122
3123         /* XXX wide commands? */
3124         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3125             "cmd notification type 0x%x qid %d idx %d\n",
3126             pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3127
3128         data = &ring->data[pkt->hdr.idx];
3129
3130         /* If the command was mapped in an mbuf, free it. */
3131         if (data->m != NULL) {
3132                 bus_dmamap_sync(ring->data_dmat, data->map,
3133                     BUS_DMASYNC_POSTWRITE);
3134                 bus_dmamap_unload(ring->data_dmat, data->map);
3135                 m_freem(data->m);
3136                 data->m = NULL;
3137         }
3138         wakeup(&ring->desc[pkt->hdr.idx]);
3139 }
3140
3141 #if 0
3142 /*
3143  * necessary only for block ack mode
3144  */
3145 void
3146 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3147         uint16_t len)
3148 {
3149         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3150         uint16_t w_val;
3151
3152         scd_bc_tbl = sc->sched_dma.vaddr;
3153
3154         len += 8; /* magic numbers came naturally from paris */
3155         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3156                 len = roundup(len, 4) / 4;
3157
3158         w_val = htole16(sta_id << 12 | len);
3159
3160         /* Update TX scheduler. */
3161         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3162         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3163             BUS_DMASYNC_PREWRITE);
3164
3165         /* I really wonder what this is ?!? */
3166         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3167                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3168                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3169                     BUS_DMASYNC_PREWRITE);
3170         }
3171 }
3172 #endif
3173
3174 /*
3175  * Take an 802.11 (non-n) rate, find the relevant rate
3176  * table entry.  return the index into in_ridx[].
3177  *
3178  * The caller then uses that index back into in_ridx
3179  * to figure out the rate index programmed /into/
3180  * the firmware for this given node.
3181  */
3182 static int
3183 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3184     uint8_t rate)
3185 {
3186         int i;
3187         uint8_t r;
3188
3189         for (i = 0; i < nitems(in->in_ridx); i++) {
3190                 r = iwm_rates[in->in_ridx[i]].rate;
3191                 if (rate == r)
3192                         return (i);
3193         }
3194         /* XXX Return the first */
3195         /* XXX TODO: have it return the /lowest/ */
3196         return (0);
3197 }
3198
3199 /*
3200  * Fill in the rate related information for a transmit command.
3201  */
3202 static const struct iwm_rate *
3203 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3204         struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3205 {
3206         struct ieee80211com *ic = &sc->sc_ic;
3207         struct ieee80211_node *ni = &in->in_ni;
3208         const struct iwm_rate *rinfo;
3209         int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3210         int ridx, rate_flags;
3211
3212         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3213         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3214
3215         /*
3216          * XXX TODO: everything about the rate selection here is terrible!
3217          */
3218
3219         if (type == IEEE80211_FC0_TYPE_DATA) {
3220                 int i;
3221                 /* for data frames, use RS table */
3222                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3223                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3224                 ridx = in->in_ridx[i];
3225
3226                 /* This is the index into the programmed table */
3227                 tx->initial_rate_index = i;
3228                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3229                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3230                     "%s: start with i=%d, txrate %d\n",
3231                     __func__, i, iwm_rates[ridx].rate);
3232         } else {
3233                 /*
3234                  * For non-data, use the lowest supported rate for the given
3235                  * operational mode.
3236                  *
3237                  * Note: there may not be any rate control information available.
3238                  * This driver currently assumes if we're transmitting data
3239                  * frames, use the rate control table.  Grr.
3240                  *
3241                  * XXX TODO: use the configured rate for the traffic type!
3242                  * XXX TODO: this should be per-vap, not curmode; as we later
3243                  * on we'll want to handle off-channel stuff (eg TDLS).
3244                  */
3245                 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3246                         /*
3247                          * XXX this assumes the mode is either 11a or not 11a;
3248                          * definitely won't work for 11n.
3249                          */
3250                         ridx = IWM_RIDX_OFDM;
3251                 } else {
3252                         ridx = IWM_RIDX_CCK;
3253                 }
3254         }
3255
3256         rinfo = &iwm_rates[ridx];
3257
3258         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3259             __func__, ridx,
3260             rinfo->rate,
3261             !! (IWM_RIDX_IS_CCK(ridx))
3262             );
3263
3264         /* XXX TODO: hard-coded TX antenna? */
3265         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3266         if (IWM_RIDX_IS_CCK(ridx))
3267                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3268         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3269
3270         return rinfo;
3271 }
3272
3273 #define TB0_SIZE 16
3274 static int
3275 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3276 {
3277         struct ieee80211com *ic = &sc->sc_ic;
3278         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3279         struct iwm_node *in = IWM_NODE(ni);
3280         struct iwm_tx_ring *ring;
3281         struct iwm_tx_data *data;
3282         struct iwm_tfd *desc;
3283         struct iwm_device_cmd *cmd;
3284         struct iwm_tx_cmd *tx;
3285         struct ieee80211_frame *wh;
3286         struct ieee80211_key *k = NULL;
3287         struct mbuf *m1;
3288         const struct iwm_rate *rinfo;
3289         uint32_t flags;
3290         u_int hdrlen;
3291         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3292         int nsegs;
3293         uint8_t tid, type;
3294         int i, totlen, error, pad;
3295
3296         wh = mtod(m, struct ieee80211_frame *);
3297         hdrlen = ieee80211_anyhdrsize(wh);
3298         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3299         tid = 0;
3300         ring = &sc->txq[ac];
3301         desc = &ring->desc[ring->cur];
3302         memset(desc, 0, sizeof(*desc));
3303         data = &ring->data[ring->cur];
3304
3305         /* Fill out iwm_tx_cmd to send to the firmware */
3306         cmd = &ring->cmd[ring->cur];
3307         cmd->hdr.code = IWM_TX_CMD;
3308         cmd->hdr.flags = 0;
3309         cmd->hdr.qid = ring->qid;
3310         cmd->hdr.idx = ring->cur;
3311
3312         tx = (void *)cmd->data;
3313         memset(tx, 0, sizeof(*tx));
3314
3315         rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3316
3317         /* Encrypt the frame if need be. */
3318         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3319                 /* Retrieve key for TX && do software encryption. */
3320                 k = ieee80211_crypto_encap(ni, m);
3321                 if (k == NULL) {
3322                         m_freem(m);
3323                         return (ENOBUFS);
3324                 }
3325                 /* 802.11 header may have moved. */
3326                 wh = mtod(m, struct ieee80211_frame *);
3327         }
3328
3329         if (ieee80211_radiotap_active_vap(vap)) {
3330                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3331
3332                 tap->wt_flags = 0;
3333                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3334                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3335                 tap->wt_rate = rinfo->rate;
3336                 if (k != NULL)
3337                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3338                 ieee80211_radiotap_tx(vap, m);
3339         }
3340
3341
3342         totlen = m->m_pkthdr.len;
3343
3344         flags = 0;
3345         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3346                 flags |= IWM_TX_CMD_FLG_ACK;
3347         }
3348
3349         if (type == IEEE80211_FC0_TYPE_DATA
3350             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3351             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3352                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3353         }
3354
3355         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3356             type != IEEE80211_FC0_TYPE_DATA)
3357                 tx->sta_id = sc->sc_aux_sta.sta_id;
3358         else
3359                 tx->sta_id = IWM_STATION_ID;
3360
3361         if (type == IEEE80211_FC0_TYPE_MGT) {
3362                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3363
3364                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3365                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3366                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3367                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3368                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3369                 } else {
3370                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3371                 }
3372         } else {
3373                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3374         }
3375
3376         if (hdrlen & 3) {
3377                 /* First segment length must be a multiple of 4. */
3378                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3379                 pad = 4 - (hdrlen & 3);
3380         } else
3381                 pad = 0;
3382
3383         tx->driver_txop = 0;
3384         tx->next_frame_len = 0;
3385
3386         tx->len = htole16(totlen);
3387         tx->tid_tspec = tid;
3388         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3389
3390         /* Set physical address of "scratch area". */
3391         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3392         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3393
3394         /* Copy 802.11 header in TX command. */
3395         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3396
3397         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3398
3399         tx->sec_ctl = 0;
3400         tx->tx_flags |= htole32(flags);
3401
3402         /* Trim 802.11 header. */
3403         m_adj(m, hdrlen);
3404         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3405             segs, &nsegs, BUS_DMA_NOWAIT);
3406         if (error != 0) {
3407                 if (error != EFBIG) {
3408                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3409                             error);
3410                         m_freem(m);
3411                         return error;
3412                 }
3413                 /* Too many DMA segments, linearize mbuf. */
3414                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3415                 if (m1 == NULL) {
3416                         device_printf(sc->sc_dev,
3417                             "%s: could not defrag mbuf\n", __func__);
3418                         m_freem(m);
3419                         return (ENOBUFS);
3420                 }
3421                 m = m1;
3422
3423                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3424                     segs, &nsegs, BUS_DMA_NOWAIT);
3425                 if (error != 0) {
3426                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3427                             error);
3428                         m_freem(m);
3429                         return error;
3430                 }
3431         }
3432         data->m = m;
3433         data->in = in;
3434         data->done = 0;
3435
3436         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3437             "sending txd %p, in %p\n", data, data->in);
3438         KASSERT(data->in != NULL, ("node is NULL"));
3439
3440         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3441             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3442             ring->qid, ring->cur, totlen, nsegs,
3443             le32toh(tx->tx_flags),
3444             le32toh(tx->rate_n_flags),
3445             tx->initial_rate_index
3446             );
3447
3448         /* Fill TX descriptor. */
3449         desc->num_tbs = 2 + nsegs;
3450
3451         desc->tbs[0].lo = htole32(data->cmd_paddr);
3452         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3453             (TB0_SIZE << 4);
3454         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3455         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3456             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3457               + hdrlen + pad - TB0_SIZE) << 4);
3458
3459         /* Other DMA segments are for data payload. */
3460         for (i = 0; i < nsegs; i++) {
3461                 seg = &segs[i];
3462                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3463                 desc->tbs[i+2].hi_n_len = \
3464                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3465                     | ((seg->ds_len) << 4);
3466         }
3467
3468         bus_dmamap_sync(ring->data_dmat, data->map,
3469             BUS_DMASYNC_PREWRITE);
3470         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3471             BUS_DMASYNC_PREWRITE);
3472         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3473             BUS_DMASYNC_PREWRITE);
3474
3475 #if 0
3476         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3477 #endif
3478
3479         /* Kick TX ring. */
3480         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3481         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3482
3483         /* Mark TX ring as full if we reach a certain threshold. */
3484         if (++ring->queued > IWM_TX_RING_HIMARK) {
3485                 sc->qfullmsk |= 1 << ring->qid;
3486         }
3487
3488         return 0;
3489 }
3490
3491 static int
3492 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3493     const struct ieee80211_bpf_params *params)
3494 {
3495         struct ieee80211com *ic = ni->ni_ic;
3496         struct iwm_softc *sc = ic->ic_softc;
3497         int error = 0;
3498
3499         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3500             "->%s begin\n", __func__);
3501
3502         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3503                 m_freem(m);
3504                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3505                     "<-%s not RUNNING\n", __func__);
3506                 return (ENETDOWN);
3507         }
3508
3509         IWM_LOCK(sc);
3510         /* XXX fix this */
3511         if (params == NULL) {
3512                 error = iwm_tx(sc, m, ni, 0);
3513         } else {
3514                 error = iwm_tx(sc, m, ni, 0);
3515         }
3516         sc->sc_tx_timer = 5;
3517         IWM_UNLOCK(sc);
3518
3519         return (error);
3520 }
3521
3522 /*
3523  * mvm/tx.c
3524  */
3525
3526 #if 0
3527 /*
3528  * Note that there are transports that buffer frames before they reach
3529  * the firmware. This means that after flush_tx_path is called, the
3530  * queue might not be empty. The race-free way to handle this is to:
3531  * 1) set the station as draining
3532  * 2) flush the Tx path
3533  * 3) wait for the transport queues to be empty
3534  */
3535 int
3536 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3537 {
3538         struct iwm_tx_path_flush_cmd flush_cmd = {
3539                 .queues_ctl = htole32(tfd_msk),
3540                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3541         };
3542         int ret;
3543
3544         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3545             sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3546             sizeof(flush_cmd), &flush_cmd);
3547         if (ret)
3548                 device_printf(sc->sc_dev,
3549                     "Flushing tx queue failed: %d\n", ret);
3550         return ret;
3551 }
3552 #endif
3553
3554 /*
3555  * BEGIN mvm/sta.c
3556  */
3557
3558 static int
3559 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3560         struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3561 {
3562         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3563             cmd, status);
3564 }
3565
3566 /* send station add/update command to firmware */
3567 static int
3568 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3569 {
3570         struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3571         int ret;
3572         uint32_t status;
3573
3574         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3575
3576         add_sta_cmd.sta_id = IWM_STATION_ID;
3577         add_sta_cmd.mac_id_n_color
3578             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3579                 IWM_DEFAULT_COLOR));
3580         if (!update) {
3581                 int ac;
3582                 for (ac = 0; ac < WME_NUM_AC; ac++) {
3583                         add_sta_cmd.tfd_queue_msk |=
3584                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3585                 }
3586                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3587         }
3588         add_sta_cmd.add_modify = update ? 1 : 0;
3589         add_sta_cmd.station_flags_msk
3590             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3591         add_sta_cmd.tid_disable_tx = htole16(0xffff);
3592         if (update)
3593                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3594
3595         status = IWM_ADD_STA_SUCCESS;
3596         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3597         if (ret)
3598                 return ret;
3599
3600         switch (status) {
3601         case IWM_ADD_STA_SUCCESS:
3602                 break;
3603         default:
3604                 ret = EIO;
3605                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3606                 break;
3607         }
3608
3609         return ret;
3610 }
3611
3612 static int
3613 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3614 {
3615         return iwm_mvm_sta_send_to_fw(sc, in, 0);
3616 }
3617
3618 static int
3619 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3620 {
3621         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3622 }
3623
3624 static int
3625 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3626         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3627 {
3628         struct iwm_mvm_add_sta_cmd_v7 cmd;
3629         int ret;
3630         uint32_t status;
3631
3632         memset(&cmd, 0, sizeof(cmd));
3633         cmd.sta_id = sta->sta_id;
3634         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3635
3636         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3637         cmd.tid_disable_tx = htole16(0xffff);
3638
3639         if (addr)
3640                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3641
3642         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3643         if (ret)
3644                 return ret;
3645
3646         switch (status) {
3647         case IWM_ADD_STA_SUCCESS:
3648                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3649                     "%s: Internal station added.\n", __func__);
3650                 return 0;
3651         default:
3652                 device_printf(sc->sc_dev,
3653                     "%s: Add internal station failed, status=0x%x\n",
3654                     __func__, status);
3655                 ret = EIO;
3656                 break;
3657         }
3658         return ret;
3659 }
3660
3661 static int
3662 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3663 {
3664         int ret;
3665
3666         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3667         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3668
3669         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3670         if (ret)
3671                 return ret;
3672
3673         ret = iwm_mvm_add_int_sta_common(sc,
3674             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3675
3676         if (ret)
3677                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3678         return ret;
3679 }
3680
3681 /*
3682  * END mvm/sta.c
3683  */
3684
3685 /*
3686  * BEGIN mvm/quota.c
3687  */
3688
3689 static int
3690 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3691 {
3692         struct iwm_time_quota_cmd cmd;
3693         int i, idx, ret, num_active_macs, quota, quota_rem;
3694         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3695         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3696         uint16_t id;
3697
3698         memset(&cmd, 0, sizeof(cmd));
3699
3700         /* currently, PHY ID == binding ID */
3701         if (in) {
3702                 id = in->in_phyctxt->id;
3703                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3704                 colors[id] = in->in_phyctxt->color;
3705
3706                 if (1)
3707                         n_ifs[id] = 1;
3708         }
3709
3710         /*
3711          * The FW's scheduling session consists of
3712          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3713          * equally between all the bindings that require quota
3714          */
3715         num_active_macs = 0;
3716         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3717                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3718                 num_active_macs += n_ifs[i];
3719         }
3720
3721         quota = 0;
3722         quota_rem = 0;
3723         if (num_active_macs) {
3724                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3725                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3726         }
3727
3728         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3729                 if (colors[i] < 0)
3730                         continue;
3731
3732                 cmd.quotas[idx].id_and_color =
3733                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3734
3735                 if (n_ifs[i] <= 0) {
3736                         cmd.quotas[idx].quota = htole32(0);
3737                         cmd.quotas[idx].max_duration = htole32(0);
3738                 } else {
3739                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3740                         cmd.quotas[idx].max_duration = htole32(0);
3741                 }
3742                 idx++;
3743         }
3744
3745         /* Give the remainder of the session to the first binding */
3746         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3747
3748         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3749             sizeof(cmd), &cmd);
3750         if (ret)
3751                 device_printf(sc->sc_dev,
3752                     "%s: Failed to send quota: %d\n", __func__, ret);
3753         return ret;
3754 }
3755
3756 /*
3757  * END mvm/quota.c
3758  */
3759
3760 /*
3761  * ieee80211 routines
3762  */
3763
3764 /*
3765  * Change to AUTH state in 80211 state machine.  Roughly matches what
3766  * Linux does in bss_info_changed().
3767  */
3768 static int
3769 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3770 {
3771         struct ieee80211_node *ni;
3772         struct iwm_node *in;
3773         struct iwm_vap *iv = IWM_VAP(vap);
3774         uint32_t duration;
3775         int error;
3776
3777         /*
3778          * XXX i have a feeling that the vap node is being
3779          * freed from underneath us. Grr.
3780          */
3781         ni = ieee80211_ref_node(vap->iv_bss);
3782         in = IWM_NODE(ni);
3783         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3784             "%s: called; vap=%p, bss ni=%p\n",
3785             __func__,
3786             vap,
3787             ni);
3788
3789         in->in_assoc = 0;
3790
3791         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3792         if (error != 0)
3793                 return error;
3794
3795         error = iwm_allow_mcast(vap, sc);
3796         if (error) {
3797                 device_printf(sc->sc_dev,
3798                     "%s: failed to set multicast\n", __func__);
3799                 goto out;
3800         }
3801
3802         /*
3803          * This is where it deviates from what Linux does.
3804          *
3805          * Linux iwlwifi doesn't reset the nic each time, nor does it
3806          * call ctxt_add() here.  Instead, it adds it during vap creation,
3807          * and always does a mac_ctx_changed().
3808          *
3809          * The openbsd port doesn't attempt to do that - it reset things
3810          * at odd states and does the add here.
3811          *
3812          * So, until the state handling is fixed (ie, we never reset
3813          * the NIC except for a firmware failure, which should drag
3814          * the NIC back to IDLE, re-setup and re-add all the mac/phy
3815          * contexts that are required), let's do a dirty hack here.
3816          */
3817         if (iv->is_uploaded) {
3818                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3819                         device_printf(sc->sc_dev,
3820                             "%s: failed to update MAC\n", __func__);
3821                         goto out;
3822                 }
3823                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3824                     in->in_ni.ni_chan, 1, 1)) != 0) {
3825                         device_printf(sc->sc_dev,
3826                             "%s: failed update phy ctxt\n", __func__);
3827                         goto out;
3828                 }
3829                 in->in_phyctxt = &sc->sc_phyctxt[0];
3830
3831                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3832                         device_printf(sc->sc_dev,
3833                             "%s: binding update cmd\n", __func__);
3834                         goto out;
3835                 }
3836                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3837                         device_printf(sc->sc_dev,
3838                             "%s: failed to update sta\n", __func__);
3839                         goto out;
3840                 }
3841         } else {
3842                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3843                         device_printf(sc->sc_dev,
3844                             "%s: failed to add MAC\n", __func__);
3845                         goto out;
3846                 }
3847                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3848                     in->in_ni.ni_chan, 1, 1)) != 0) {
3849                         device_printf(sc->sc_dev,
3850                             "%s: failed add phy ctxt!\n", __func__);
3851                         error = ETIMEDOUT;
3852                         goto out;
3853                 }
3854                 in->in_phyctxt = &sc->sc_phyctxt[0];
3855
3856                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3857                         device_printf(sc->sc_dev,
3858                             "%s: binding add cmd\n", __func__);
3859                         goto out;
3860                 }
3861                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3862                         device_printf(sc->sc_dev,
3863                             "%s: failed to add sta\n", __func__);
3864                         goto out;
3865                 }
3866         }
3867
3868         /*
3869          * Prevent the FW from wandering off channel during association
3870          * by "protecting" the session with a time event.
3871          */
3872         /* XXX duration is in units of TU, not MS */
3873         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3874         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3875         DELAY(100);
3876
3877         error = 0;
3878 out:
3879         ieee80211_free_node(ni);
3880         return (error);
3881 }
3882
3883 static int
3884 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3885 {
3886         struct iwm_node *in = IWM_NODE(vap->iv_bss);
3887         int error;
3888
3889         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3890                 device_printf(sc->sc_dev,
3891                     "%s: failed to update STA\n", __func__);
3892                 return error;
3893         }
3894
3895         in->in_assoc = 1;
3896         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3897                 device_printf(sc->sc_dev,
3898                     "%s: failed to update MAC\n", __func__);
3899                 return error;
3900         }
3901
3902         return 0;
3903 }
3904
3905 static int
3906 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3907 {
3908         /*
3909          * Ok, so *technically* the proper set of calls for going
3910          * from RUN back to SCAN is:
3911          *
3912          * iwm_mvm_power_mac_disable(sc, in);
3913          * iwm_mvm_mac_ctxt_changed(sc, in);
3914          * iwm_mvm_rm_sta(sc, in);
3915          * iwm_mvm_update_quotas(sc, NULL);
3916          * iwm_mvm_mac_ctxt_changed(sc, in);
3917          * iwm_mvm_binding_remove_vif(sc, in);
3918          * iwm_mvm_mac_ctxt_remove(sc, in);
3919          *
3920          * However, that freezes the device not matter which permutations
3921          * and modifications are attempted.  Obviously, this driver is missing
3922          * something since it works in the Linux driver, but figuring out what
3923          * is missing is a little more complicated.  Now, since we're going
3924          * back to nothing anyway, we'll just do a complete device reset.
3925          * Up your's, device!
3926          */
3927         /* iwm_mvm_flush_tx_path(sc, 0xf, 1); */
3928         iwm_stop_device(sc);
3929         iwm_init_hw(sc);
3930         if (in)
3931                 in->in_assoc = 0;
3932         return 0;
3933
3934 #if 0
3935         int error;
3936
3937         iwm_mvm_power_mac_disable(sc, in);
3938
3939         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3940                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
3941                 return error;
3942         }
3943
3944         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
3945                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
3946                 return error;
3947         }
3948         error = iwm_mvm_rm_sta(sc, in);
3949         in->in_assoc = 0;
3950         iwm_mvm_update_quotas(sc, NULL);
3951         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3952                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
3953                 return error;
3954         }
3955         iwm_mvm_binding_remove_vif(sc, in);
3956
3957         iwm_mvm_mac_ctxt_remove(sc, in);
3958
3959         return error;
3960 #endif
3961 }
3962
3963 static struct ieee80211_node *
3964 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3965 {
3966         return malloc(sizeof (struct iwm_node), M_80211_NODE,
3967             M_NOWAIT | M_ZERO);
3968 }
3969
3970 static void
3971 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
3972 {
3973         struct ieee80211_node *ni = &in->in_ni;
3974         struct iwm_lq_cmd *lq = &in->in_lq;
3975         int nrates = ni->ni_rates.rs_nrates;
3976         int i, ridx, tab = 0;
3977         int txant = 0;
3978
3979         if (nrates > nitems(lq->rs_table)) {
3980                 device_printf(sc->sc_dev,
3981                     "%s: node supports %d rates, driver handles "
3982                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
3983                 return;
3984         }
3985         if (nrates == 0) {
3986                 device_printf(sc->sc_dev,
3987                     "%s: node supports 0 rates, odd!\n", __func__);
3988                 return;
3989         }
3990
3991         /*
3992          * XXX .. and most of iwm_node is not initialised explicitly;
3993          * it's all just 0x0 passed to the firmware.
3994          */
3995
3996         /* first figure out which rates we should support */
3997         /* XXX TODO: this isn't 11n aware /at all/ */
3998         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
3999         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4000             "%s: nrates=%d\n", __func__, nrates);
4001
4002         /*
4003          * Loop over nrates and populate in_ridx from the highest
4004          * rate to the lowest rate.  Remember, in_ridx[] has
4005          * IEEE80211_RATE_MAXSIZE entries!
4006          */
4007         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4008                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4009
4010                 /* Map 802.11 rate to HW rate index. */
4011                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4012                         if (iwm_rates[ridx].rate == rate)
4013                                 break;
4014                 if (ridx > IWM_RIDX_MAX) {
4015                         device_printf(sc->sc_dev,
4016                             "%s: WARNING: device rate for %d not found!\n",
4017                             __func__, rate);
4018                 } else {
4019                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4020                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4021                             __func__,
4022                             i,
4023                             rate,
4024                             ridx);
4025                         in->in_ridx[i] = ridx;
4026                 }
4027         }
4028
4029         /* then construct a lq_cmd based on those */
4030         memset(lq, 0, sizeof(*lq));
4031         lq->sta_id = IWM_STATION_ID;
4032
4033         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4034         if (ni->ni_flags & IEEE80211_NODE_HT)
4035                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4036
4037         /*
4038          * are these used? (we don't do SISO or MIMO)
4039          * need to set them to non-zero, though, or we get an error.
4040          */
4041         lq->single_stream_ant_msk = 1;
4042         lq->dual_stream_ant_msk = 1;
4043
4044         /*
4045          * Build the actual rate selection table.
4046          * The lowest bits are the rates.  Additionally,
4047          * CCK needs bit 9 to be set.  The rest of the bits
4048          * we add to the table select the tx antenna
4049          * Note that we add the rates in the highest rate first
4050          * (opposite of ni_rates).
4051          */
4052         /*
4053          * XXX TODO: this should be looping over the min of nrates
4054          * and LQ_MAX_RETRY_NUM.  Sigh.
4055          */
4056         for (i = 0; i < nrates; i++) {
4057                 int nextant;
4058
4059                 if (txant == 0)
4060                         txant = iwm_fw_valid_tx_ant(sc);
4061                 nextant = 1<<(ffs(txant)-1);
4062                 txant &= ~nextant;
4063
4064                 /*
4065                  * Map the rate id into a rate index into
4066                  * our hardware table containing the
4067                  * configuration to use for this rate.
4068                  */
4069                 ridx = in->in_ridx[i];
4070                 tab = iwm_rates[ridx].plcp;
4071                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4072                 if (IWM_RIDX_IS_CCK(ridx))
4073                         tab |= IWM_RATE_MCS_CCK_MSK;
4074                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4075                     "station rate i=%d, rate=%d, hw=%x\n",
4076                     i, iwm_rates[ridx].rate, tab);
4077                 lq->rs_table[i] = htole32(tab);
4078         }
4079         /* then fill the rest with the lowest possible rate */
4080         for (i = nrates; i < nitems(lq->rs_table); i++) {
4081                 KASSERT(tab != 0, ("invalid tab"));
4082                 lq->rs_table[i] = htole32(tab);
4083         }
4084 }
4085
4086 static int
4087 iwm_media_change(struct ifnet *ifp)
4088 {
4089         struct ieee80211vap *vap = ifp->if_softc;
4090         struct ieee80211com *ic = vap->iv_ic;
4091         struct iwm_softc *sc = ic->ic_softc;
4092         int error;
4093
4094         error = ieee80211_media_change(ifp);
4095         if (error != ENETRESET)
4096                 return error;
4097
4098         IWM_LOCK(sc);
4099         if (ic->ic_nrunning > 0) {
4100                 iwm_stop(sc);
4101                 iwm_init(sc);
4102         }
4103         IWM_UNLOCK(sc);
4104         return error;
4105 }
4106
4107
4108 static int
4109 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4110 {
4111         struct iwm_vap *ivp = IWM_VAP(vap);
4112         struct ieee80211com *ic = vap->iv_ic;
4113         struct iwm_softc *sc = ic->ic_softc;
4114         struct iwm_node *in;
4115         int error;
4116
4117         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4118             "switching state %s -> %s\n",
4119             ieee80211_state_name[vap->iv_state],
4120             ieee80211_state_name[nstate]);
4121         IEEE80211_UNLOCK(ic);
4122         IWM_LOCK(sc);
4123
4124         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4125                 iwm_led_blink_stop(sc);
4126
4127         /* disable beacon filtering if we're hopping out of RUN */
4128         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4129                 iwm_mvm_disable_beacon_filter(sc);
4130
4131                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4132                         in->in_assoc = 0;
4133
4134                 iwm_release(sc, NULL);
4135
4136                 /*
4137                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4138                  * above then the card will be completely reinitialized,
4139                  * so the driver must do everything necessary to bring the card
4140                  * from INIT to SCAN.
4141                  *
4142                  * Additionally, upon receiving deauth frame from AP,
4143                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4144                  * state. This will also fail with this driver, so bring the FSM
4145                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4146                  *
4147                  * XXX TODO: fix this for FreeBSD!
4148                  */
4149                 if (nstate == IEEE80211_S_SCAN ||
4150                     nstate == IEEE80211_S_AUTH ||
4151                     nstate == IEEE80211_S_ASSOC) {
4152                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4153                             "Force transition to INIT; MGT=%d\n", arg);
4154                         IWM_UNLOCK(sc);
4155                         IEEE80211_LOCK(ic);
4156                         /* Always pass arg as -1 since we can't Tx right now. */
4157                         /*
4158                          * XXX arg is just ignored anyway when transitioning
4159                          *     to IEEE80211_S_INIT.
4160                          */
4161                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4162                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4163                             "Going INIT->SCAN\n");
4164                         nstate = IEEE80211_S_SCAN;
4165                         IEEE80211_UNLOCK(ic);
4166                         IWM_LOCK(sc);
4167                 }
4168         }
4169
4170         switch (nstate) {
4171         case IEEE80211_S_INIT:
4172                 break;
4173
4174         case IEEE80211_S_AUTH:
4175                 if ((error = iwm_auth(vap, sc)) != 0) {
4176                         device_printf(sc->sc_dev,
4177                             "%s: could not move to auth state: %d\n",
4178                             __func__, error);
4179                         break;
4180                 }
4181                 break;
4182
4183         case IEEE80211_S_ASSOC:
4184                 if ((error = iwm_assoc(vap, sc)) != 0) {
4185                         device_printf(sc->sc_dev,
4186                             "%s: failed to associate: %d\n", __func__,
4187                             error);
4188                         break;
4189                 }
4190                 break;
4191
4192         case IEEE80211_S_RUN:
4193         {
4194                 struct iwm_host_cmd cmd = {
4195                         .id = IWM_LQ_CMD,
4196                         .len = { sizeof(in->in_lq), },
4197                         .flags = IWM_CMD_SYNC,
4198                 };
4199
4200                 /* Update the association state, now we have it all */
4201                 /* (eg associd comes in at this point */
4202                 error = iwm_assoc(vap, sc);
4203                 if (error != 0) {
4204                         device_printf(sc->sc_dev,
4205                             "%s: failed to update association state: %d\n",
4206                             __func__,
4207                             error);
4208                         break;
4209                 }
4210
4211                 in = IWM_NODE(vap->iv_bss);
4212                 iwm_mvm_power_mac_update_mode(sc, in);
4213                 iwm_mvm_enable_beacon_filter(sc, in);
4214                 iwm_mvm_update_quotas(sc, in);
4215                 iwm_setrates(sc, in);
4216
4217                 cmd.data[0] = &in->in_lq;
4218                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4219                         device_printf(sc->sc_dev,
4220                             "%s: IWM_LQ_CMD failed\n", __func__);
4221                 }
4222
4223                 iwm_mvm_led_enable(sc);
4224                 break;
4225         }
4226
4227         default:
4228                 break;
4229         }
4230         IWM_UNLOCK(sc);
4231         IEEE80211_LOCK(ic);
4232
4233         return (ivp->iv_newstate(vap, nstate, arg));
4234 }
4235
4236 void
4237 iwm_endscan_cb(void *arg, int pending)
4238 {
4239         struct iwm_softc *sc = arg;
4240         struct ieee80211com *ic = &sc->sc_ic;
4241
4242         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4243             "%s: scan ended\n",
4244             __func__);
4245
4246         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4247 }
4248
4249 /*
4250  * Aging and idle timeouts for the different possible scenarios
4251  * in default configuration
4252  */
4253 static const uint32_t
4254 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4255         {
4256                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4257                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4258         },
4259         {
4260                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4261                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4262         },
4263         {
4264                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4265                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4266         },
4267         {
4268                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4269                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4270         },
4271         {
4272                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4273                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4274         },
4275 };
4276
4277 /*
4278  * Aging and idle timeouts for the different possible scenarios
4279  * in single BSS MAC configuration.
4280  */
4281 static const uint32_t
4282 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4283         {
4284                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4285                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4286         },
4287         {
4288                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4289                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4290         },
4291         {
4292                 htole32(IWM_SF_MCAST_AGING_TIMER),
4293                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4294         },
4295         {
4296                 htole32(IWM_SF_BA_AGING_TIMER),
4297                 htole32(IWM_SF_BA_IDLE_TIMER)
4298         },
4299         {
4300                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4301                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4302         },
4303 };
4304
4305 static void
4306 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4307     struct ieee80211_node *ni)
4308 {
4309         int i, j, watermark;
4310
4311         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4312
4313         /*
4314          * If we are in association flow - check antenna configuration
4315          * capabilities of the AP station, and choose the watermark accordingly.
4316          */
4317         if (ni) {
4318                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4319 #ifdef notyet
4320                         if (ni->ni_rxmcs[2] != 0)
4321                                 watermark = IWM_SF_W_MARK_MIMO3;
4322                         else if (ni->ni_rxmcs[1] != 0)
4323                                 watermark = IWM_SF_W_MARK_MIMO2;
4324                         else
4325 #endif
4326                                 watermark = IWM_SF_W_MARK_SISO;
4327                 } else {
4328                         watermark = IWM_SF_W_MARK_LEGACY;
4329                 }
4330         /* default watermark value for unassociated mode. */
4331         } else {
4332                 watermark = IWM_SF_W_MARK_MIMO2;
4333         }
4334         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4335
4336         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4337                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4338                         sf_cmd->long_delay_timeouts[i][j] =
4339                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4340                 }
4341         }
4342
4343         if (ni) {
4344                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4345                        sizeof(iwm_sf_full_timeout));
4346         } else {
4347                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4348                        sizeof(iwm_sf_full_timeout_def));
4349         }
4350 }
4351
4352 static int
4353 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4354 {
4355         struct ieee80211com *ic = &sc->sc_ic;
4356         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4357         struct iwm_sf_cfg_cmd sf_cmd = {
4358                 .state = htole32(IWM_SF_FULL_ON),
4359         };
4360         int ret = 0;
4361
4362         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
4363                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4364
4365         switch (new_state) {
4366         case IWM_SF_UNINIT:
4367         case IWM_SF_INIT_OFF:
4368                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4369                 break;
4370         case IWM_SF_FULL_ON:
4371                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4372                 break;
4373         default:
4374                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4375                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4376                           new_state);
4377                 return EINVAL;
4378         }
4379
4380         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4381                                    sizeof(sf_cmd), &sf_cmd);
4382         return ret;
4383 }
4384
4385 static int
4386 iwm_send_bt_init_conf(struct iwm_softc *sc)
4387 {
4388         struct iwm_bt_coex_cmd bt_cmd;
4389
4390         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4391         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4392
4393         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4394             &bt_cmd);
4395 }
4396
4397 static int
4398 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4399 {
4400         struct iwm_mcc_update_cmd mcc_cmd;
4401         struct iwm_host_cmd hcmd = {
4402                 .id = IWM_MCC_UPDATE_CMD,
4403                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4404                 .data = { &mcc_cmd },
4405         };
4406         int ret;
4407 #ifdef IWM_DEBUG
4408         struct iwm_rx_packet *pkt;
4409         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4410         struct iwm_mcc_update_resp *mcc_resp;
4411         int n_channels;
4412         uint16_t mcc;
4413 #endif
4414         int resp_v2 = isset(sc->sc_enabled_capa,
4415             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4416
4417         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4418         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4419         if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4420             isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4421                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4422         else
4423                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4424
4425         if (resp_v2)
4426                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4427         else
4428                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4429
4430         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4431             "send MCC update to FW with '%c%c' src = %d\n",
4432             alpha2[0], alpha2[1], mcc_cmd.source_id);
4433
4434         ret = iwm_send_cmd(sc, &hcmd);
4435         if (ret)
4436                 return ret;
4437
4438 #ifdef IWM_DEBUG
4439         pkt = hcmd.resp_pkt;
4440
4441         /* Extract MCC response */
4442         if (resp_v2) {
4443                 mcc_resp = (void *)pkt->data;
4444                 mcc = mcc_resp->mcc;
4445                 n_channels =  le32toh(mcc_resp->n_channels);
4446         } else {
4447                 mcc_resp_v1 = (void *)pkt->data;
4448                 mcc = mcc_resp_v1->mcc;
4449                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4450         }
4451
4452         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4453         if (mcc == 0)
4454                 mcc = 0x3030;  /* "00" - world */
4455
4456         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4457             "regulatory domain '%c%c' (%d channels available)\n",
4458             mcc >> 8, mcc & 0xff, n_channels);
4459 #endif
4460         iwm_free_resp(sc, &hcmd);
4461
4462         return 0;
4463 }
4464
4465 static void
4466 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4467 {
4468         struct iwm_host_cmd cmd = {
4469                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4470                 .len = { sizeof(uint32_t), },
4471                 .data = { &backoff, },
4472         };
4473
4474         if (iwm_send_cmd(sc, &cmd) != 0) {
4475                 device_printf(sc->sc_dev,
4476                     "failed to change thermal tx backoff\n");
4477         }
4478 }
4479
4480 static int
4481 iwm_init_hw(struct iwm_softc *sc)
4482 {
4483         struct ieee80211com *ic = &sc->sc_ic;
4484         int error, i, ac;
4485
4486         if ((error = iwm_start_hw(sc)) != 0) {
4487                 printf("iwm_start_hw: failed %d\n", error);
4488                 return error;
4489         }
4490
4491         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4492                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4493                 return error;
4494         }
4495
4496         /*
4497          * should stop and start HW since that INIT
4498          * image just loaded
4499          */
4500         iwm_stop_device(sc);
4501         if ((error = iwm_start_hw(sc)) != 0) {
4502                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4503                 return error;
4504         }
4505
4506         /* omstart, this time with the regular firmware */
4507         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
4508         if (error) {
4509                 device_printf(sc->sc_dev, "could not load firmware\n");
4510                 goto error;
4511         }
4512
4513         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4514                 device_printf(sc->sc_dev, "bt init conf failed\n");
4515                 goto error;
4516         }
4517
4518         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
4519                 device_printf(sc->sc_dev, "antenna config failed\n");
4520                 goto error;
4521         }
4522
4523         /* Send phy db control command and then phy db calibration*/
4524         if ((error = iwm_send_phy_db_data(sc)) != 0) {
4525                 device_printf(sc->sc_dev, "phy_db_data failed\n");
4526                 goto error;
4527         }
4528
4529         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4530                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4531                 goto error;
4532         }
4533
4534         /* Add auxiliary station for scanning */
4535         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4536                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4537                 goto error;
4538         }
4539
4540         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4541                 /*
4542                  * The channel used here isn't relevant as it's
4543                  * going to be overwritten in the other flows.
4544                  * For now use the first channel we have.
4545                  */
4546                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4547                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4548                         goto error;
4549         }
4550
4551         /* Initialize tx backoffs to the minimum. */
4552         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
4553                 iwm_mvm_tt_tx_backoff(sc, 0);
4554
4555         error = iwm_mvm_power_update_device(sc);
4556         if (error)
4557                 goto error;
4558
4559         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4560                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4561                         goto error;
4562         }
4563
4564         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4565                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4566                         goto error;
4567         }
4568
4569         /* Enable Tx queues. */
4570         for (ac = 0; ac < WME_NUM_AC; ac++) {
4571                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4572                     iwm_mvm_ac_to_tx_fifo[ac]);
4573                 if (error)
4574                         goto error;
4575         }
4576
4577         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4578                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4579                 goto error;
4580         }
4581
4582         return 0;
4583
4584  error:
4585         iwm_stop_device(sc);
4586         return error;
4587 }
4588
4589 /* Allow multicast from our BSSID. */
4590 static int
4591 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4592 {
4593         struct ieee80211_node *ni = vap->iv_bss;
4594         struct iwm_mcast_filter_cmd *cmd;
4595         size_t size;
4596         int error;
4597
4598         size = roundup(sizeof(*cmd), 4);
4599         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4600         if (cmd == NULL)
4601                 return ENOMEM;
4602         cmd->filter_own = 1;
4603         cmd->port_id = 0;
4604         cmd->count = 0;
4605         cmd->pass_all = 1;
4606         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4607
4608         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4609             IWM_CMD_SYNC, size, cmd);
4610         free(cmd, M_DEVBUF);
4611
4612         return (error);
4613 }
4614
4615 /*
4616  * ifnet interfaces
4617  */
4618
4619 static void
4620 iwm_init(struct iwm_softc *sc)
4621 {
4622         int error;
4623
4624         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4625                 return;
4626         }
4627         sc->sc_generation++;
4628         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4629
4630         if ((error = iwm_init_hw(sc)) != 0) {
4631                 printf("iwm_init_hw failed %d\n", error);
4632                 iwm_stop(sc);
4633                 return;
4634         }
4635
4636         /*
4637          * Ok, firmware loaded and we are jogging
4638          */
4639         sc->sc_flags |= IWM_FLAG_HW_INITED;
4640         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4641 }
4642
4643 static int
4644 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4645 {
4646         struct iwm_softc *sc;
4647         int error;
4648
4649         sc = ic->ic_softc;
4650
4651         IWM_LOCK(sc);
4652         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4653                 IWM_UNLOCK(sc);
4654                 return (ENXIO);
4655         }
4656         error = mbufq_enqueue(&sc->sc_snd, m);
4657         if (error) {
4658                 IWM_UNLOCK(sc);
4659                 return (error);
4660         }
4661         iwm_start(sc);
4662         IWM_UNLOCK(sc);
4663         return (0);
4664 }
4665
4666 /*
4667  * Dequeue packets from sendq and call send.
4668  */
4669 static void
4670 iwm_start(struct iwm_softc *sc)
4671 {
4672         struct ieee80211_node *ni;
4673         struct mbuf *m;
4674         int ac = 0;
4675
4676         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4677         while (sc->qfullmsk == 0 &&
4678                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4679                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4680                 if (iwm_tx(sc, m, ni, ac) != 0) {
4681                         if_inc_counter(ni->ni_vap->iv_ifp,
4682                             IFCOUNTER_OERRORS, 1);
4683                         ieee80211_free_node(ni);
4684                         continue;
4685                 }
4686                 sc->sc_tx_timer = 15;
4687         }
4688         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4689 }
4690
4691 static void
4692 iwm_stop(struct iwm_softc *sc)
4693 {
4694
4695         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4696         sc->sc_flags |= IWM_FLAG_STOPPED;
4697         sc->sc_generation++;
4698         iwm_led_blink_stop(sc);
4699         sc->sc_tx_timer = 0;
4700         iwm_stop_device(sc);
4701 }
4702
4703 static void
4704 iwm_watchdog(void *arg)
4705 {
4706         struct iwm_softc *sc = arg;
4707         struct ieee80211com *ic = &sc->sc_ic;
4708
4709         if (sc->sc_tx_timer > 0) {
4710                 if (--sc->sc_tx_timer == 0) {
4711                         device_printf(sc->sc_dev, "device timeout\n");
4712 #ifdef IWM_DEBUG
4713                         iwm_nic_error(sc);
4714 #endif
4715                         ieee80211_restart_all(ic);
4716                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4717                         return;
4718                 }
4719         }
4720         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4721 }
4722
4723 static void
4724 iwm_parent(struct ieee80211com *ic)
4725 {
4726         struct iwm_softc *sc = ic->ic_softc;
4727         int startall = 0;
4728
4729         IWM_LOCK(sc);
4730         if (ic->ic_nrunning > 0) {
4731                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4732                         iwm_init(sc);
4733                         startall = 1;
4734                 }
4735         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4736                 iwm_stop(sc);
4737         IWM_UNLOCK(sc);
4738         if (startall)
4739                 ieee80211_start_all(ic);
4740 }
4741
4742 /*
4743  * The interrupt side of things
4744  */
4745
4746 /*
4747  * error dumping routines are from iwlwifi/mvm/utils.c
4748  */
4749
4750 /*
4751  * Note: This structure is read from the device with IO accesses,
4752  * and the reading already does the endian conversion. As it is
4753  * read with uint32_t-sized accesses, any members with a different size
4754  * need to be ordered correctly though!
4755  */
4756 struct iwm_error_event_table {
4757         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4758         uint32_t error_id;              /* type of error */
4759         uint32_t trm_hw_status0;        /* TRM HW status */
4760         uint32_t trm_hw_status1;        /* TRM HW status */
4761         uint32_t blink2;                /* branch link */
4762         uint32_t ilink1;                /* interrupt link */
4763         uint32_t ilink2;                /* interrupt link */
4764         uint32_t data1;         /* error-specific data */
4765         uint32_t data2;         /* error-specific data */
4766         uint32_t data3;         /* error-specific data */
4767         uint32_t bcon_time;             /* beacon timer */
4768         uint32_t tsf_low;               /* network timestamp function timer */
4769         uint32_t tsf_hi;                /* network timestamp function timer */
4770         uint32_t gp1;           /* GP1 timer register */
4771         uint32_t gp2;           /* GP2 timer register */
4772         uint32_t fw_rev_type;   /* firmware revision type */
4773         uint32_t major;         /* uCode version major */
4774         uint32_t minor;         /* uCode version minor */
4775         uint32_t hw_ver;                /* HW Silicon version */
4776         uint32_t brd_ver;               /* HW board version */
4777         uint32_t log_pc;                /* log program counter */
4778         uint32_t frame_ptr;             /* frame pointer */
4779         uint32_t stack_ptr;             /* stack pointer */
4780         uint32_t hcmd;          /* last host command header */
4781         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4782                                  * rxtx_flag */
4783         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4784                                  * host_flag */
4785         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4786                                  * enc_flag */
4787         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4788                                  * time_flag */
4789         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4790                                  * wico interrupt */
4791         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
4792         uint32_t wait_event;            /* wait event() caller address */
4793         uint32_t l2p_control;   /* L2pControlField */
4794         uint32_t l2p_duration;  /* L2pDurationField */
4795         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4796         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4797         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4798                                  * (LMPM_PMG_SEL) */
4799         uint32_t u_timestamp;   /* indicate when the date and time of the
4800                                  * compilation */
4801         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
4802 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4803
4804 /*
4805  * UMAC error struct - relevant starting from family 8000 chip.
4806  * Note: This structure is read from the device with IO accesses,
4807  * and the reading already does the endian conversion. As it is
4808  * read with u32-sized accesses, any members with a different size
4809  * need to be ordered correctly though!
4810  */
4811 struct iwm_umac_error_event_table {
4812         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4813         uint32_t error_id;      /* type of error */
4814         uint32_t blink1;        /* branch link */
4815         uint32_t blink2;        /* branch link */
4816         uint32_t ilink1;        /* interrupt link */
4817         uint32_t ilink2;        /* interrupt link */
4818         uint32_t data1;         /* error-specific data */
4819         uint32_t data2;         /* error-specific data */
4820         uint32_t data3;         /* error-specific data */
4821         uint32_t umac_major;
4822         uint32_t umac_minor;
4823         uint32_t frame_pointer; /* core register 27*/
4824         uint32_t stack_pointer; /* core register 28 */
4825         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
4826         uint32_t nic_isr_pref;  /* ISR status register */
4827 } __packed;
4828
4829 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4830 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4831
4832 #ifdef IWM_DEBUG
4833 struct {
4834         const char *name;
4835         uint8_t num;
4836 } advanced_lookup[] = {
4837         { "NMI_INTERRUPT_WDG", 0x34 },
4838         { "SYSASSERT", 0x35 },
4839         { "UCODE_VERSION_MISMATCH", 0x37 },
4840         { "BAD_COMMAND", 0x38 },
4841         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4842         { "FATAL_ERROR", 0x3D },
4843         { "NMI_TRM_HW_ERR", 0x46 },
4844         { "NMI_INTERRUPT_TRM", 0x4C },
4845         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4846         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4847         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4848         { "NMI_INTERRUPT_HOST", 0x66 },
4849         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4850         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4851         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4852         { "ADVANCED_SYSASSERT", 0 },
4853 };
4854
4855 static const char *
4856 iwm_desc_lookup(uint32_t num)
4857 {
4858         int i;
4859
4860         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4861                 if (advanced_lookup[i].num == num)
4862                         return advanced_lookup[i].name;
4863
4864         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4865         return advanced_lookup[i].name;
4866 }
4867
4868 static void
4869 iwm_nic_umac_error(struct iwm_softc *sc)
4870 {
4871         struct iwm_umac_error_event_table table;
4872         uint32_t base;
4873
4874         base = sc->sc_uc.uc_umac_error_event_table;
4875
4876         if (base < 0x800000) {
4877                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
4878                     base);
4879                 return;
4880         }
4881
4882         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4883                 device_printf(sc->sc_dev, "reading errlog failed\n");
4884                 return;
4885         }
4886
4887         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4888                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
4889                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4890                     sc->sc_flags, table.valid);
4891         }
4892
4893         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
4894                 iwm_desc_lookup(table.error_id));
4895         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
4896         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
4897         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
4898             table.ilink1);
4899         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
4900             table.ilink2);
4901         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
4902         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
4903         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
4904         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
4905         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
4906         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
4907             table.frame_pointer);
4908         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
4909             table.stack_pointer);
4910         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
4911         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
4912             table.nic_isr_pref);
4913 }
4914
4915 /*
4916  * Support for dumping the error log seemed like a good idea ...
4917  * but it's mostly hex junk and the only sensible thing is the
4918  * hw/ucode revision (which we know anyway).  Since it's here,
4919  * I'll just leave it in, just in case e.g. the Intel guys want to
4920  * help us decipher some "ADVANCED_SYSASSERT" later.
4921  */
4922 static void
4923 iwm_nic_error(struct iwm_softc *sc)
4924 {
4925         struct iwm_error_event_table table;
4926         uint32_t base;
4927
4928         device_printf(sc->sc_dev, "dumping device error log\n");
4929         base = sc->sc_uc.uc_error_event_table;
4930         if (base < 0x800000) {
4931                 device_printf(sc->sc_dev,
4932                     "Invalid error log pointer 0x%08x\n", base);
4933                 return;
4934         }
4935
4936         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4937                 device_printf(sc->sc_dev, "reading errlog failed\n");
4938                 return;
4939         }
4940
4941         if (!table.valid) {
4942                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
4943                 return;
4944         }
4945
4946         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4947                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
4948                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4949                     sc->sc_flags, table.valid);
4950         }
4951
4952         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
4953             iwm_desc_lookup(table.error_id));
4954         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
4955             table.trm_hw_status0);
4956         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
4957             table.trm_hw_status1);
4958         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
4959         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
4960         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
4961         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
4962         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
4963         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
4964         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
4965         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
4966         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
4967         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
4968         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
4969         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
4970             table.fw_rev_type);
4971         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
4972         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
4973         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
4974         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
4975         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
4976         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
4977         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
4978         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
4979         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
4980         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
4981         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
4982         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
4983         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
4984         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
4985         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
4986         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
4987         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
4988         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
4989         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
4990
4991         if (sc->sc_uc.uc_umac_error_event_table)
4992                 iwm_nic_umac_error(sc);
4993 }
4994 #endif
4995
4996 #define SYNC_RESP_STRUCT(_var_, _pkt_)                                  \
4997 do {                                                                    \
4998         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4999         _var_ = (void *)((_pkt_)+1);                                    \
5000 } while (/*CONSTCOND*/0)
5001
5002 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)                              \
5003 do {                                                                    \
5004         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
5005         _ptr_ = (void *)((_pkt_)+1);                                    \
5006 } while (/*CONSTCOND*/0)
5007
5008 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5009
5010 /*
5011  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5012  * Basic structure from if_iwn
5013  */
5014 static void
5015 iwm_notif_intr(struct iwm_softc *sc)
5016 {
5017         struct ieee80211com *ic = &sc->sc_ic;
5018         uint16_t hw;
5019
5020         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5021             BUS_DMASYNC_POSTREAD);
5022
5023         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5024
5025         /*
5026          * Process responses
5027          */
5028         while (sc->rxq.cur != hw) {
5029                 struct iwm_rx_ring *ring = &sc->rxq;
5030                 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
5031                 struct iwm_rx_packet *pkt;
5032                 struct iwm_cmd_response *cresp;
5033                 int qid, idx, code;
5034
5035                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5036                     BUS_DMASYNC_POSTREAD);
5037                 pkt = mtod(data->m, struct iwm_rx_packet *);
5038
5039                 qid = pkt->hdr.qid & ~0x80;
5040                 idx = pkt->hdr.idx;
5041
5042                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5043                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5044                     "rx packet qid=%d idx=%d type=%x %d %d\n",
5045                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, sc->rxq.cur, hw);
5046
5047                 /*
5048                  * randomly get these from the firmware, no idea why.
5049                  * they at least seem harmless, so just ignore them for now
5050                  */
5051                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5052                     || pkt->len_n_flags == htole32(0x55550000))) {
5053                         ADVANCE_RXQ(sc);
5054                         continue;
5055                 }
5056
5057                 switch (code) {
5058                 case IWM_REPLY_RX_PHY_CMD:
5059                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5060                         break;
5061
5062                 case IWM_REPLY_RX_MPDU_CMD:
5063                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5064                         break;
5065
5066                 case IWM_TX_CMD:
5067                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
5068                         break;
5069
5070                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5071                         struct iwm_missed_beacons_notif *resp;
5072                         int missed;
5073
5074                         /* XXX look at mac_id to determine interface ID */
5075                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5076
5077                         SYNC_RESP_STRUCT(resp, pkt);
5078                         missed = le32toh(resp->consec_missed_beacons);
5079
5080                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5081                             "%s: MISSED_BEACON: mac_id=%d, "
5082                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5083                             "num_rx=%d\n",
5084                             __func__,
5085                             le32toh(resp->mac_id),
5086                             le32toh(resp->consec_missed_beacons_since_last_rx),
5087                             le32toh(resp->consec_missed_beacons),
5088                             le32toh(resp->num_expected_beacons),
5089                             le32toh(resp->num_recvd_beacons));
5090
5091                         /* Be paranoid */
5092                         if (vap == NULL)
5093                                 break;
5094
5095                         /* XXX no net80211 locking? */
5096                         if (vap->iv_state == IEEE80211_S_RUN &&
5097                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5098                                 if (missed > vap->iv_bmissthreshold) {
5099                                         /* XXX bad locking; turn into task */
5100                                         IWM_UNLOCK(sc);
5101                                         ieee80211_beacon_miss(ic);
5102                                         IWM_LOCK(sc);
5103                                 }
5104                         }
5105
5106                         break; }
5107
5108                 case IWM_MFUART_LOAD_NOTIFICATION:
5109                         break;
5110
5111                 case IWM_MVM_ALIVE: {
5112                         struct iwm_mvm_alive_resp_v1 *resp1;
5113                         struct iwm_mvm_alive_resp_v2 *resp2;
5114                         struct iwm_mvm_alive_resp_v3 *resp3;
5115
5116                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
5117                                 SYNC_RESP_STRUCT(resp1, pkt);
5118                                 sc->sc_uc.uc_error_event_table
5119                                     = le32toh(resp1->error_event_table_ptr);
5120                                 sc->sc_uc.uc_log_event_table
5121                                     = le32toh(resp1->log_event_table_ptr);
5122                                 sc->sched_base = le32toh(resp1->scd_base_ptr);
5123                                 if (resp1->status == IWM_ALIVE_STATUS_OK)
5124                                         sc->sc_uc.uc_ok = 1;
5125                                 else
5126                                         sc->sc_uc.uc_ok = 0;
5127                         }
5128
5129                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
5130                                 SYNC_RESP_STRUCT(resp2, pkt);
5131                                 sc->sc_uc.uc_error_event_table
5132                                     = le32toh(resp2->error_event_table_ptr);
5133                                 sc->sc_uc.uc_log_event_table
5134                                     = le32toh(resp2->log_event_table_ptr);
5135                                 sc->sched_base = le32toh(resp2->scd_base_ptr);
5136                                 sc->sc_uc.uc_umac_error_event_table
5137                                     = le32toh(resp2->error_info_addr);
5138                                 if (resp2->status == IWM_ALIVE_STATUS_OK)
5139                                         sc->sc_uc.uc_ok = 1;
5140                                 else
5141                                         sc->sc_uc.uc_ok = 0;
5142                         }
5143
5144                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
5145                                 SYNC_RESP_STRUCT(resp3, pkt);
5146                                 sc->sc_uc.uc_error_event_table
5147                                     = le32toh(resp3->error_event_table_ptr);
5148                                 sc->sc_uc.uc_log_event_table
5149                                     = le32toh(resp3->log_event_table_ptr);
5150                                 sc->sched_base = le32toh(resp3->scd_base_ptr);
5151                                 sc->sc_uc.uc_umac_error_event_table
5152                                     = le32toh(resp3->error_info_addr);
5153                                 if (resp3->status == IWM_ALIVE_STATUS_OK)
5154                                         sc->sc_uc.uc_ok = 1;
5155                                 else
5156                                         sc->sc_uc.uc_ok = 0;
5157                         }
5158
5159                         sc->sc_uc.uc_intr = 1;
5160                         wakeup(&sc->sc_uc);
5161                         break; }
5162
5163                 case IWM_CALIB_RES_NOTIF_PHY_DB: {
5164                         struct iwm_calib_res_notif_phy_db *phy_db_notif;
5165                         SYNC_RESP_STRUCT(phy_db_notif, pkt);
5166
5167                         iwm_phy_db_set_section(sc, phy_db_notif);
5168
5169                         break; }
5170
5171                 case IWM_STATISTICS_NOTIFICATION: {
5172                         struct iwm_notif_statistics *stats;
5173                         SYNC_RESP_STRUCT(stats, pkt);
5174                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5175                         sc->sc_noise = iwm_get_noise(&stats->rx.general);
5176                         break; }
5177
5178                 case IWM_NVM_ACCESS_CMD:
5179                 case IWM_MCC_UPDATE_CMD:
5180                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5181                                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5182                                     BUS_DMASYNC_POSTREAD);
5183                                 memcpy(sc->sc_cmd_resp,
5184                                     pkt, sizeof(sc->sc_cmd_resp));
5185                         }
5186                         break;
5187
5188                 case IWM_MCC_CHUB_UPDATE_CMD: {
5189                         struct iwm_mcc_chub_notif *notif;
5190                         SYNC_RESP_STRUCT(notif, pkt);
5191
5192                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5193                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5194                         sc->sc_fw_mcc[2] = '\0';
5195                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5196                             "fw source %d sent CC '%s'\n",
5197                             notif->source_id, sc->sc_fw_mcc);
5198                         break; }
5199
5200                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5201                         break;
5202
5203                 case IWM_PHY_CONFIGURATION_CMD:
5204                 case IWM_TX_ANT_CONFIGURATION_CMD:
5205                 case IWM_ADD_STA:
5206                 case IWM_MAC_CONTEXT_CMD:
5207                 case IWM_REPLY_SF_CFG_CMD:
5208                 case IWM_POWER_TABLE_CMD:
5209                 case IWM_PHY_CONTEXT_CMD:
5210                 case IWM_BINDING_CONTEXT_CMD:
5211                 case IWM_TIME_EVENT_CMD:
5212                 case IWM_SCAN_REQUEST_CMD:
5213                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5214                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5215                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5216                 case IWM_REPLY_BEACON_FILTERING_CMD:
5217                 case IWM_MAC_PM_POWER_TABLE:
5218                 case IWM_TIME_QUOTA_CMD:
5219                 case IWM_REMOVE_STA:
5220                 case IWM_TXPATH_FLUSH:
5221                 case IWM_LQ_CMD:
5222                 case IWM_BT_CONFIG:
5223                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5224                         SYNC_RESP_STRUCT(cresp, pkt);
5225                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5226                                 memcpy(sc->sc_cmd_resp,
5227                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5228                         }
5229                         break;
5230
5231                 /* ignore */
5232                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5233                         break;
5234
5235                 case IWM_INIT_COMPLETE_NOTIF:
5236                         sc->sc_init_complete = 1;
5237                         wakeup(&sc->sc_init_complete);
5238                         break;
5239
5240                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5241                         struct iwm_periodic_scan_complete *notif;
5242                         SYNC_RESP_STRUCT(notif, pkt);
5243                         break;
5244                 }
5245
5246                 case IWM_SCAN_ITERATION_COMPLETE: {
5247                         struct iwm_lmac_scan_complete_notif *notif;
5248                         SYNC_RESP_STRUCT(notif, pkt);
5249                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5250                         break;
5251                 }
5252  
5253                 case IWM_SCAN_COMPLETE_UMAC: {
5254                         struct iwm_umac_scan_complete *notif;
5255                         SYNC_RESP_STRUCT(notif, pkt);
5256
5257                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5258                             "UMAC scan complete, status=0x%x\n",
5259                             notif->status);
5260 #if 0   /* XXX This would be a duplicate scan end call */
5261                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5262 #endif
5263                         break;
5264                 }
5265
5266                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5267                         struct iwm_umac_scan_iter_complete_notif *notif;
5268                         SYNC_RESP_STRUCT(notif, pkt);
5269
5270                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5271                             "complete, status=0x%x, %d channels scanned\n",
5272                             notif->status, notif->scanned_channels);
5273                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5274                         break;
5275                 }
5276
5277                 case IWM_REPLY_ERROR: {
5278                         struct iwm_error_resp *resp;
5279                         SYNC_RESP_STRUCT(resp, pkt);
5280
5281                         device_printf(sc->sc_dev,
5282                             "firmware error 0x%x, cmd 0x%x\n",
5283                             le32toh(resp->error_type),
5284                             resp->cmd_id);
5285                         break;
5286                 }
5287
5288                 case IWM_TIME_EVENT_NOTIFICATION: {
5289                         struct iwm_time_event_notif *notif;
5290                         SYNC_RESP_STRUCT(notif, pkt);
5291
5292                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5293                             "TE notif status = 0x%x action = 0x%x\n",
5294                             notif->status, notif->action);
5295                         break;
5296                 }
5297
5298                 case IWM_MCAST_FILTER_CMD:
5299                         break;
5300
5301                 case IWM_SCD_QUEUE_CFG: {
5302                         struct iwm_scd_txq_cfg_rsp *rsp;
5303                         SYNC_RESP_STRUCT(rsp, pkt);
5304
5305                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5306                             "queue cfg token=0x%x sta_id=%d "
5307                             "tid=%d scd_queue=%d\n",
5308                             rsp->token, rsp->sta_id, rsp->tid,
5309                             rsp->scd_queue);
5310                         break;
5311                 }
5312
5313                 default:
5314                         device_printf(sc->sc_dev,
5315                             "frame %d/%d %x UNHANDLED (this should "
5316                             "not happen)\n", qid, idx,
5317                             pkt->len_n_flags);
5318                         break;
5319                 }
5320
5321                 /*
5322                  * Why test bit 0x80?  The Linux driver:
5323                  *
5324                  * There is one exception:  uCode sets bit 15 when it
5325                  * originates the response/notification, i.e. when the
5326                  * response/notification is not a direct response to a
5327                  * command sent by the driver.  For example, uCode issues
5328                  * IWM_REPLY_RX when it sends a received frame to the driver;
5329                  * it is not a direct response to any driver command.
5330                  *
5331                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5332                  * uses a slightly different format for pkt->hdr, and "qid"
5333                  * is actually the upper byte of a two-byte field.
5334                  */
5335                 if (!(pkt->hdr.qid & (1 << 7))) {
5336                         iwm_cmd_done(sc, pkt);
5337                 }
5338
5339                 ADVANCE_RXQ(sc);
5340         }
5341
5342         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
5343             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5344
5345         /*
5346          * Tell the firmware what we have processed.
5347          * Seems like the hardware gets upset unless we align
5348          * the write by 8??
5349          */
5350         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5351         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5352 }
5353
5354 static void
5355 iwm_intr(void *arg)
5356 {
5357         struct iwm_softc *sc = arg;
5358         int handled = 0;
5359         int r1, r2, rv = 0;
5360         int isperiodic = 0;
5361
5362         IWM_LOCK(sc);
5363         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5364
5365         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5366                 uint32_t *ict = sc->ict_dma.vaddr;
5367                 int tmp;
5368
5369                 tmp = htole32(ict[sc->ict_cur]);
5370                 if (!tmp)
5371                         goto out_ena;
5372
5373                 /*
5374                  * ok, there was something.  keep plowing until we have all.
5375                  */
5376                 r1 = r2 = 0;
5377                 while (tmp) {
5378                         r1 |= tmp;
5379                         ict[sc->ict_cur] = 0;
5380                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5381                         tmp = htole32(ict[sc->ict_cur]);
5382                 }
5383
5384                 /* this is where the fun begins.  don't ask */
5385                 if (r1 == 0xffffffff)
5386                         r1 = 0;
5387
5388                 /* i am not expected to understand this */
5389                 if (r1 & 0xc0000)
5390                         r1 |= 0x8000;
5391                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5392         } else {
5393                 r1 = IWM_READ(sc, IWM_CSR_INT);
5394                 /* "hardware gone" (where, fishing?) */
5395                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5396                         goto out;
5397                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5398         }
5399         if (r1 == 0 && r2 == 0) {
5400                 goto out_ena;
5401         }
5402
5403         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5404
5405         /* ignored */
5406         handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
5407
5408         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5409                 int i;
5410                 struct ieee80211com *ic = &sc->sc_ic;
5411                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5412
5413 #ifdef IWM_DEBUG
5414                 iwm_nic_error(sc);
5415 #endif
5416                 /* Dump driver status (TX and RX rings) while we're here. */
5417                 device_printf(sc->sc_dev, "driver status:\n");
5418                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5419                         struct iwm_tx_ring *ring = &sc->txq[i];
5420                         device_printf(sc->sc_dev,
5421                             "  tx ring %2d: qid=%-2d cur=%-3d "
5422                             "queued=%-3d\n",
5423                             i, ring->qid, ring->cur, ring->queued);
5424                 }
5425                 device_printf(sc->sc_dev,
5426                     "  rx ring: cur=%d\n", sc->rxq.cur);
5427                 device_printf(sc->sc_dev,
5428                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5429
5430                 /* Don't stop the device; just do a VAP restart */
5431                 IWM_UNLOCK(sc);
5432
5433                 if (vap == NULL) {
5434                         printf("%s: null vap\n", __func__);
5435                         return;
5436                 }
5437
5438                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5439                     "restarting\n", __func__, vap->iv_state);
5440
5441                 /* XXX TODO: turn this into a callout/taskqueue */
5442                 ieee80211_restart_all(ic);
5443                 return;
5444         }
5445
5446         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5447                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5448                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5449                 iwm_stop(sc);
5450                 rv = 1;
5451                 goto out;
5452         }
5453
5454         /* firmware chunk loaded */
5455         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5456                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5457                 handled |= IWM_CSR_INT_BIT_FH_TX;
5458                 sc->sc_fw_chunk_done = 1;
5459                 wakeup(&sc->sc_fw);
5460         }
5461
5462         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5463                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5464                 if (iwm_check_rfkill(sc)) {
5465                         device_printf(sc->sc_dev,
5466                             "%s: rfkill switch, disabling interface\n",
5467                             __func__);
5468                         iwm_stop(sc);
5469                 }
5470         }
5471
5472         /*
5473          * The Linux driver uses periodic interrupts to avoid races.
5474          * We cargo-cult like it's going out of fashion.
5475          */
5476         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5477                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5478                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5479                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5480                         IWM_WRITE_1(sc,
5481                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5482                 isperiodic = 1;
5483         }
5484
5485         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5486                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5487                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5488
5489                 iwm_notif_intr(sc);
5490
5491                 /* enable periodic interrupt, see above */
5492                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5493                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5494                             IWM_CSR_INT_PERIODIC_ENA);
5495         }
5496
5497         if (__predict_false(r1 & ~handled))
5498                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5499                     "%s: unhandled interrupts: %x\n", __func__, r1);
5500         rv = 1;
5501
5502  out_ena:
5503         iwm_restore_interrupts(sc);
5504  out:
5505         IWM_UNLOCK(sc);
5506         return;
5507 }
5508
5509 /*
5510  * Autoconf glue-sniffing
5511  */
5512 #define PCI_VENDOR_INTEL                0x8086
5513 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5514 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5515 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5516 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5517 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5518 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5519 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5520 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5521 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5522 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5523
5524 static const struct iwm_devices {
5525         uint16_t        device;
5526         const char      *name;
5527 } iwm_devices[] = {
5528         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5529         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5530         { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5531         { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5532         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5533         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5534         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5535         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5536         { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5537         { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5538 };
5539
5540 static int
5541 iwm_probe(device_t dev)
5542 {
5543         int i;
5544
5545         for (i = 0; i < nitems(iwm_devices); i++) {
5546                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5547                     pci_get_device(dev) == iwm_devices[i].device) {
5548                         device_set_desc(dev, iwm_devices[i].name);
5549                         return (BUS_PROBE_DEFAULT);
5550                 }
5551         }
5552
5553         return (ENXIO);
5554 }
5555
5556 static int
5557 iwm_dev_check(device_t dev)
5558 {
5559         struct iwm_softc *sc;
5560
5561         sc = device_get_softc(dev);
5562
5563         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5564         switch (pci_get_device(dev)) {
5565         case PCI_PRODUCT_INTEL_WL_3160_1:
5566         case PCI_PRODUCT_INTEL_WL_3160_2:
5567                 sc->sc_fwname = "iwm3160fw";
5568                 sc->host_interrupt_operation_mode = 1;
5569                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5570                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5571                 return (0);
5572         case PCI_PRODUCT_INTEL_WL_3165_1:
5573         case PCI_PRODUCT_INTEL_WL_3165_2:
5574                 sc->sc_fwname = "iwm7265fw";
5575                 sc->host_interrupt_operation_mode = 0;
5576                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5577                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5578                 return (0);
5579         case PCI_PRODUCT_INTEL_WL_7260_1:
5580         case PCI_PRODUCT_INTEL_WL_7260_2:
5581                 sc->sc_fwname = "iwm7260fw";
5582                 sc->host_interrupt_operation_mode = 1;
5583                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5584                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5585                 return (0);
5586         case PCI_PRODUCT_INTEL_WL_7265_1:
5587         case PCI_PRODUCT_INTEL_WL_7265_2:
5588                 sc->sc_fwname = "iwm7265fw";
5589                 sc->host_interrupt_operation_mode = 0;
5590                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5591                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5592                 return (0);
5593         case PCI_PRODUCT_INTEL_WL_8260_1:
5594         case PCI_PRODUCT_INTEL_WL_8260_2:
5595                 sc->sc_fwname = "iwm8000Cfw";
5596                 sc->host_interrupt_operation_mode = 0;
5597                 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
5598                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
5599                 return (0);
5600         default:
5601                 device_printf(dev, "unknown adapter type\n");
5602                 return ENXIO;
5603         }
5604 }
5605
5606 static int
5607 iwm_pci_attach(device_t dev)
5608 {
5609         struct iwm_softc *sc;
5610         int count, error, rid;
5611         uint16_t reg;
5612
5613         sc = device_get_softc(dev);
5614
5615         /* Clear device-specific "PCI retry timeout" register (41h). */
5616         reg = pci_read_config(dev, 0x40, sizeof(reg));
5617         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5618
5619         /* Enable bus-mastering and hardware bug workaround. */
5620         pci_enable_busmaster(dev);
5621         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5622         /* if !MSI */
5623         if (reg & PCIM_STATUS_INTxSTATE) {
5624                 reg &= ~PCIM_STATUS_INTxSTATE;
5625         }
5626         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5627
5628         rid = PCIR_BAR(0);
5629         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5630             RF_ACTIVE);
5631         if (sc->sc_mem == NULL) {
5632                 device_printf(sc->sc_dev, "can't map mem space\n");
5633                 return (ENXIO);
5634         }
5635         sc->sc_st = rman_get_bustag(sc->sc_mem);
5636         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5637
5638         /* Install interrupt handler. */
5639         count = 1;
5640         rid = 0;
5641         if (pci_alloc_msi(dev, &count) == 0)
5642                 rid = 1;
5643         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5644             (rid != 0 ? 0 : RF_SHAREABLE));
5645         if (sc->sc_irq == NULL) {
5646                 device_printf(dev, "can't map interrupt\n");
5647                         return (ENXIO);
5648         }
5649         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5650             NULL, iwm_intr, sc, &sc->sc_ih);
5651         if (sc->sc_ih == NULL) {
5652                 device_printf(dev, "can't establish interrupt");
5653                         return (ENXIO);
5654         }
5655         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5656
5657         return (0);
5658 }
5659
5660 static void
5661 iwm_pci_detach(device_t dev)
5662 {
5663         struct iwm_softc *sc = device_get_softc(dev);
5664
5665         if (sc->sc_irq != NULL) {
5666                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5667                 bus_release_resource(dev, SYS_RES_IRQ,
5668                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5669                 pci_release_msi(dev);
5670         }
5671         if (sc->sc_mem != NULL)
5672                 bus_release_resource(dev, SYS_RES_MEMORY,
5673                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5674 }
5675
5676
5677
5678 static int
5679 iwm_attach(device_t dev)
5680 {
5681         struct iwm_softc *sc = device_get_softc(dev);
5682         struct ieee80211com *ic = &sc->sc_ic;
5683         int error;
5684         int txq_i, i;
5685
5686         sc->sc_dev = dev;
5687         IWM_LOCK_INIT(sc);
5688         mbufq_init(&sc->sc_snd, ifqmaxlen);
5689         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5690         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5691         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5692
5693         /* PCI attach */
5694         error = iwm_pci_attach(dev);
5695         if (error != 0)
5696                 goto fail;
5697
5698         sc->sc_wantresp = -1;
5699
5700         /* Check device type */
5701         error = iwm_dev_check(dev);
5702         if (error != 0)
5703                 goto fail;
5704
5705         /*
5706          * We now start fiddling with the hardware
5707          */
5708         /*
5709          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5710          * changed, and now the revision step also includes bit 0-1 (no more
5711          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5712          * in the old format.
5713          */
5714         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
5715                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5716                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5717
5718         if (iwm_prepare_card_hw(sc) != 0) {
5719                 device_printf(dev, "could not initialize hardware\n");
5720                 goto fail;
5721         }
5722
5723         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
5724                 int ret;
5725                 uint32_t hw_step;
5726
5727                 /*
5728                  * In order to recognize C step the driver should read the
5729                  * chip version id located at the AUX bus MISC address.
5730                  */
5731                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5732                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5733                 DELAY(2);
5734
5735                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5736                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5737                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5738                                    25000);
5739                 if (ret < 0) {
5740                         device_printf(sc->sc_dev,
5741                             "Failed to wake up the nic\n");
5742                         goto fail;
5743                 }
5744
5745                 if (iwm_nic_lock(sc)) {
5746                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5747                         hw_step |= IWM_ENABLE_WFPM;
5748                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5749                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5750                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5751                         if (hw_step == 0x3)
5752                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5753                                                 (IWM_SILICON_C_STEP << 2);
5754                         iwm_nic_unlock(sc);
5755                 } else {
5756                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
5757                         goto fail;
5758                 }
5759         }
5760
5761         /* Allocate DMA memory for firmware transfers. */
5762         if ((error = iwm_alloc_fwmem(sc)) != 0) {
5763                 device_printf(dev, "could not allocate memory for firmware\n");
5764                 goto fail;
5765         }
5766
5767         /* Allocate "Keep Warm" page. */
5768         if ((error = iwm_alloc_kw(sc)) != 0) {
5769                 device_printf(dev, "could not allocate keep warm page\n");
5770                 goto fail;
5771         }
5772
5773         /* We use ICT interrupts */
5774         if ((error = iwm_alloc_ict(sc)) != 0) {
5775                 device_printf(dev, "could not allocate ICT table\n");
5776                 goto fail;
5777         }
5778
5779         /* Allocate TX scheduler "rings". */
5780         if ((error = iwm_alloc_sched(sc)) != 0) {
5781                 device_printf(dev, "could not allocate TX scheduler rings\n");
5782                 goto fail;
5783         }
5784
5785         /* Allocate TX rings */
5786         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5787                 if ((error = iwm_alloc_tx_ring(sc,
5788                     &sc->txq[txq_i], txq_i)) != 0) {
5789                         device_printf(dev,
5790                             "could not allocate TX ring %d\n",
5791                             txq_i);
5792                         goto fail;
5793                 }
5794         }
5795
5796         /* Allocate RX ring. */
5797         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5798                 device_printf(dev, "could not allocate RX ring\n");
5799                 goto fail;
5800         }
5801
5802         /* Clear pending interrupts. */
5803         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5804
5805         ic->ic_softc = sc;
5806         ic->ic_name = device_get_nameunit(sc->sc_dev);
5807         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
5808         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
5809
5810         /* Set device capabilities. */
5811         ic->ic_caps =
5812             IEEE80211_C_STA |
5813             IEEE80211_C_WPA |           /* WPA/RSN */
5814             IEEE80211_C_WME |
5815             IEEE80211_C_SHSLOT |        /* short slot time supported */
5816             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
5817 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
5818             ;
5819         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5820                 sc->sc_phyctxt[i].id = i;
5821                 sc->sc_phyctxt[i].color = 0;
5822                 sc->sc_phyctxt[i].ref = 0;
5823                 sc->sc_phyctxt[i].channel = NULL;
5824         }
5825
5826         /* Max RSSI */
5827         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5828         sc->sc_preinit_hook.ich_func = iwm_preinit;
5829         sc->sc_preinit_hook.ich_arg = sc;
5830         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5831                 device_printf(dev, "config_intrhook_establish failed\n");
5832                 goto fail;
5833         }
5834
5835 #ifdef IWM_DEBUG
5836         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5837             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
5838             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
5839 #endif
5840
5841         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5842             "<-%s\n", __func__);
5843
5844         return 0;
5845
5846         /* Free allocated memory if something failed during attachment. */
5847 fail:
5848         iwm_detach_local(sc, 0);
5849
5850         return ENXIO;
5851 }
5852
5853 static int
5854 iwm_is_valid_ether_addr(uint8_t *addr)
5855 {
5856         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
5857
5858         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
5859                 return (FALSE);
5860
5861         return (TRUE);
5862 }
5863
5864 static int
5865 iwm_update_edca(struct ieee80211com *ic)
5866 {
5867         struct iwm_softc *sc = ic->ic_softc;
5868
5869         device_printf(sc->sc_dev, "%s: called\n", __func__);
5870         return (0);
5871 }
5872
5873 static void
5874 iwm_preinit(void *arg)
5875 {
5876         struct iwm_softc *sc = arg;
5877         device_t dev = sc->sc_dev;
5878         struct ieee80211com *ic = &sc->sc_ic;
5879         int error;
5880
5881         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5882             "->%s\n", __func__);
5883
5884         IWM_LOCK(sc);
5885         if ((error = iwm_start_hw(sc)) != 0) {
5886                 device_printf(dev, "could not initialize hardware\n");
5887                 IWM_UNLOCK(sc);
5888                 goto fail;
5889         }
5890
5891         error = iwm_run_init_mvm_ucode(sc, 1);
5892         iwm_stop_device(sc);
5893         if (error) {
5894                 IWM_UNLOCK(sc);
5895                 goto fail;
5896         }
5897         device_printf(dev,
5898             "hw rev 0x%x, fw ver %s, address %s\n",
5899             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
5900             sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
5901
5902         /* not all hardware can do 5GHz band */
5903         if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
5904                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
5905                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
5906         IWM_UNLOCK(sc);
5907
5908         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
5909             ic->ic_channels);
5910
5911         /*
5912          * At this point we've committed - if we fail to do setup,
5913          * we now also have to tear down the net80211 state.
5914          */
5915         ieee80211_ifattach(ic);
5916         ic->ic_vap_create = iwm_vap_create;
5917         ic->ic_vap_delete = iwm_vap_delete;
5918         ic->ic_raw_xmit = iwm_raw_xmit;
5919         ic->ic_node_alloc = iwm_node_alloc;
5920         ic->ic_scan_start = iwm_scan_start;
5921         ic->ic_scan_end = iwm_scan_end;
5922         ic->ic_update_mcast = iwm_update_mcast;
5923         ic->ic_getradiocaps = iwm_init_channel_map;
5924         ic->ic_set_channel = iwm_set_channel;
5925         ic->ic_scan_curchan = iwm_scan_curchan;
5926         ic->ic_scan_mindwell = iwm_scan_mindwell;
5927         ic->ic_wme.wme_update = iwm_update_edca;
5928         ic->ic_parent = iwm_parent;
5929         ic->ic_transmit = iwm_transmit;
5930         iwm_radiotap_attach(sc);
5931         if (bootverbose)
5932                 ieee80211_announce(ic);
5933
5934         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5935             "<-%s\n", __func__);
5936         config_intrhook_disestablish(&sc->sc_preinit_hook);
5937
5938         return;
5939 fail:
5940         config_intrhook_disestablish(&sc->sc_preinit_hook);
5941         iwm_detach_local(sc, 0);
5942 }
5943
5944 /*
5945  * Attach the interface to 802.11 radiotap.
5946  */
5947 static void
5948 iwm_radiotap_attach(struct iwm_softc *sc)
5949 {
5950         struct ieee80211com *ic = &sc->sc_ic;
5951
5952         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5953             "->%s begin\n", __func__);
5954         ieee80211_radiotap_attach(ic,
5955             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
5956                 IWM_TX_RADIOTAP_PRESENT,
5957             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
5958                 IWM_RX_RADIOTAP_PRESENT);
5959         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5960             "->%s end\n", __func__);
5961 }
5962
5963 static struct ieee80211vap *
5964 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
5965     enum ieee80211_opmode opmode, int flags,
5966     const uint8_t bssid[IEEE80211_ADDR_LEN],
5967     const uint8_t mac[IEEE80211_ADDR_LEN])
5968 {
5969         struct iwm_vap *ivp;
5970         struct ieee80211vap *vap;
5971
5972         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
5973                 return NULL;
5974         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
5975         vap = &ivp->iv_vap;
5976         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
5977         vap->iv_bmissthreshold = 10;            /* override default */
5978         /* Override with driver methods. */
5979         ivp->iv_newstate = vap->iv_newstate;
5980         vap->iv_newstate = iwm_newstate;
5981
5982         ieee80211_ratectl_init(vap);
5983         /* Complete setup. */
5984         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
5985             mac);
5986         ic->ic_opmode = opmode;
5987
5988         return vap;
5989 }
5990
5991 static void
5992 iwm_vap_delete(struct ieee80211vap *vap)
5993 {
5994         struct iwm_vap *ivp = IWM_VAP(vap);
5995
5996         ieee80211_ratectl_deinit(vap);
5997         ieee80211_vap_detach(vap);
5998         free(ivp, M_80211_VAP);
5999 }
6000
6001 static void
6002 iwm_scan_start(struct ieee80211com *ic)
6003 {
6004         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6005         struct iwm_softc *sc = ic->ic_softc;
6006         int error;
6007
6008         IWM_LOCK(sc);
6009         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6010                 error = iwm_mvm_umac_scan(sc);
6011         else
6012                 error = iwm_mvm_lmac_scan(sc);
6013         if (error != 0) {
6014                 device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
6015                 IWM_UNLOCK(sc);
6016                 ieee80211_cancel_scan(vap);
6017         } else {
6018                 iwm_led_blink_start(sc);
6019                 IWM_UNLOCK(sc);
6020         }
6021 }
6022
6023 static void
6024 iwm_scan_end(struct ieee80211com *ic)
6025 {
6026         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6027         struct iwm_softc *sc = ic->ic_softc;
6028
6029         IWM_LOCK(sc);
6030         iwm_led_blink_stop(sc);
6031         if (vap->iv_state == IEEE80211_S_RUN)
6032                 iwm_mvm_led_enable(sc);
6033         IWM_UNLOCK(sc);
6034 }
6035
6036 static void
6037 iwm_update_mcast(struct ieee80211com *ic)
6038 {
6039 }
6040
6041 static void
6042 iwm_set_channel(struct ieee80211com *ic)
6043 {
6044 }
6045
6046 static void
6047 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6048 {
6049 }
6050
6051 static void
6052 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6053 {
6054         return;
6055 }
6056
6057 void
6058 iwm_init_task(void *arg1)
6059 {
6060         struct iwm_softc *sc = arg1;
6061
6062         IWM_LOCK(sc);
6063         while (sc->sc_flags & IWM_FLAG_BUSY)
6064                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6065         sc->sc_flags |= IWM_FLAG_BUSY;
6066         iwm_stop(sc);
6067         if (sc->sc_ic.ic_nrunning > 0)
6068                 iwm_init(sc);
6069         sc->sc_flags &= ~IWM_FLAG_BUSY;
6070         wakeup(&sc->sc_flags);
6071         IWM_UNLOCK(sc);
6072 }
6073
6074 static int
6075 iwm_resume(device_t dev)
6076 {
6077         struct iwm_softc *sc = device_get_softc(dev);
6078         int do_reinit = 0;
6079         uint16_t reg;
6080
6081         /* Clear device-specific "PCI retry timeout" register (41h). */
6082         reg = pci_read_config(dev, 0x40, sizeof(reg));
6083         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
6084         iwm_init_task(device_get_softc(dev));
6085
6086         IWM_LOCK(sc);
6087         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6088                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6089                 do_reinit = 1;
6090         }
6091         IWM_UNLOCK(sc);
6092
6093         if (do_reinit)
6094                 ieee80211_resume_all(&sc->sc_ic);
6095
6096         return 0;
6097 }
6098
6099 static int
6100 iwm_suspend(device_t dev)
6101 {
6102         int do_stop = 0;
6103         struct iwm_softc *sc = device_get_softc(dev);
6104
6105         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6106
6107         ieee80211_suspend_all(&sc->sc_ic);
6108
6109         if (do_stop) {
6110                 IWM_LOCK(sc);
6111                 iwm_stop(sc);
6112                 sc->sc_flags |= IWM_FLAG_SCANNING;
6113                 IWM_UNLOCK(sc);
6114         }
6115
6116         return (0);
6117 }
6118
6119 static int
6120 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6121 {
6122         struct iwm_fw_info *fw = &sc->sc_fw;
6123         device_t dev = sc->sc_dev;
6124         int i;
6125
6126         ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6127
6128         callout_drain(&sc->sc_led_blink_to);
6129         callout_drain(&sc->sc_watchdog_to);
6130         iwm_stop_device(sc);
6131         if (do_net80211) {
6132                 ieee80211_ifdetach(&sc->sc_ic);
6133         }
6134
6135         iwm_phy_db_free(sc);
6136
6137         /* Free descriptor rings */
6138         iwm_free_rx_ring(sc, &sc->rxq);
6139         for (i = 0; i < nitems(sc->txq); i++)
6140                 iwm_free_tx_ring(sc, &sc->txq[i]);
6141
6142         /* Free firmware */
6143         if (fw->fw_fp != NULL)
6144                 iwm_fw_info_free(fw);
6145
6146         /* Free scheduler */
6147         iwm_free_sched(sc);
6148         if (sc->ict_dma.vaddr != NULL)
6149                 iwm_free_ict(sc);
6150         if (sc->kw_dma.vaddr != NULL)
6151                 iwm_free_kw(sc);
6152         if (sc->fw_dma.vaddr != NULL)
6153                 iwm_free_fwmem(sc);
6154
6155         /* Finished with the hardware - detach things */
6156         iwm_pci_detach(dev);
6157
6158         mbufq_drain(&sc->sc_snd);
6159         IWM_LOCK_DESTROY(sc);
6160
6161         return (0);
6162 }
6163
6164 static int
6165 iwm_detach(device_t dev)
6166 {
6167         struct iwm_softc *sc = device_get_softc(dev);
6168
6169         return (iwm_detach_local(sc, 1));
6170 }
6171
6172 static device_method_t iwm_pci_methods[] = {
6173         /* Device interface */
6174         DEVMETHOD(device_probe,         iwm_probe),
6175         DEVMETHOD(device_attach,        iwm_attach),
6176         DEVMETHOD(device_detach,        iwm_detach),
6177         DEVMETHOD(device_suspend,       iwm_suspend),
6178         DEVMETHOD(device_resume,        iwm_resume),
6179
6180         DEVMETHOD_END
6181 };
6182
6183 static driver_t iwm_pci_driver = {
6184         "iwm",
6185         iwm_pci_methods,
6186         sizeof (struct iwm_softc)
6187 };
6188
6189 static devclass_t iwm_devclass;
6190
6191 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6192 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6193 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6194 MODULE_DEPEND(iwm, wlan, 1, 1, 1);