]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
MFV r304732.
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109
110 #include <sys/param.h>
111 #include <sys/bus.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
126
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
130
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
133
134 #include <net/bpf.h>
135
136 #include <net/if.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
142
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
147
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
152
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_util.h>
157 #include <dev/iwm/if_iwm_binding.h>
158 #include <dev/iwm/if_iwm_phy_db.h>
159 #include <dev/iwm/if_iwm_mac_ctxt.h>
160 #include <dev/iwm/if_iwm_phy_ctxt.h>
161 #include <dev/iwm/if_iwm_time_event.h>
162 #include <dev/iwm/if_iwm_power.h>
163 #include <dev/iwm/if_iwm_scan.h>
164
165 #include <dev/iwm/if_iwm_pcie_trans.h>
166 #include <dev/iwm/if_iwm_led.h>
167
168 const uint8_t iwm_nvm_channels[] = {
169         /* 2.4 GHz */
170         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
171         /* 5 GHz */
172         36, 40, 44, 48, 52, 56, 60, 64,
173         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
174         149, 153, 157, 161, 165
175 };
176 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
177     "IWM_NUM_CHANNELS is too small");
178
179 const uint8_t iwm_nvm_channels_8000[] = {
180         /* 2.4 GHz */
181         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
182         /* 5 GHz */
183         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
184         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
185         149, 153, 157, 161, 165, 169, 173, 177, 181
186 };
187 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
188     "IWM_NUM_CHANNELS_8000 is too small");
189
190 #define IWM_NUM_2GHZ_CHANNELS   14
191 #define IWM_N_HW_ADDR_MASK      0xF
192
193 /*
194  * XXX For now, there's simply a fixed set of rate table entries
195  * that are populated.
196  */
197 const struct iwm_rate {
198         uint8_t rate;
199         uint8_t plcp;
200 } iwm_rates[] = {
201         {   2,  IWM_RATE_1M_PLCP  },
202         {   4,  IWM_RATE_2M_PLCP  },
203         {  11,  IWM_RATE_5M_PLCP  },
204         {  22,  IWM_RATE_11M_PLCP },
205         {  12,  IWM_RATE_6M_PLCP  },
206         {  18,  IWM_RATE_9M_PLCP  },
207         {  24,  IWM_RATE_12M_PLCP },
208         {  36,  IWM_RATE_18M_PLCP },
209         {  48,  IWM_RATE_24M_PLCP },
210         {  72,  IWM_RATE_36M_PLCP },
211         {  96,  IWM_RATE_48M_PLCP },
212         { 108,  IWM_RATE_54M_PLCP },
213 };
214 #define IWM_RIDX_CCK    0
215 #define IWM_RIDX_OFDM   4
216 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
217 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
218 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
219
220 struct iwm_nvm_section {
221         uint16_t length;
222         uint8_t *data;
223 };
224
225 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
226 static int      iwm_firmware_store_section(struct iwm_softc *,
227                                            enum iwm_ucode_type,
228                                            const uint8_t *, size_t);
229 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
230 static void     iwm_fw_info_free(struct iwm_fw_info *);
231 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
232 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
233 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
234                                      bus_size_t, bus_size_t);
235 static void     iwm_dma_contig_free(struct iwm_dma_info *);
236 static int      iwm_alloc_fwmem(struct iwm_softc *);
237 static void     iwm_free_fwmem(struct iwm_softc *);
238 static int      iwm_alloc_sched(struct iwm_softc *);
239 static void     iwm_free_sched(struct iwm_softc *);
240 static int      iwm_alloc_kw(struct iwm_softc *);
241 static void     iwm_free_kw(struct iwm_softc *);
242 static int      iwm_alloc_ict(struct iwm_softc *);
243 static void     iwm_free_ict(struct iwm_softc *);
244 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
245 static void     iwm_disable_rx_dma(struct iwm_softc *);
246 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
247 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
248 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
249                                   int);
250 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
251 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
252 static void     iwm_enable_interrupts(struct iwm_softc *);
253 static void     iwm_restore_interrupts(struct iwm_softc *);
254 static void     iwm_disable_interrupts(struct iwm_softc *);
255 static void     iwm_ict_reset(struct iwm_softc *);
256 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
257 static void     iwm_stop_device(struct iwm_softc *);
258 static void     iwm_mvm_nic_config(struct iwm_softc *);
259 static int      iwm_nic_rx_init(struct iwm_softc *);
260 static int      iwm_nic_tx_init(struct iwm_softc *);
261 static int      iwm_nic_init(struct iwm_softc *);
262 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
263 static int      iwm_post_alive(struct iwm_softc *);
264 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
265                                    uint16_t, uint8_t *, uint16_t *);
266 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
267                                      uint16_t *, size_t);
268 static uint32_t iwm_eeprom_channel_flags(uint16_t);
269 static void     iwm_add_channel_band(struct iwm_softc *,
270                     struct ieee80211_channel[], int, int *, int, size_t,
271                     const uint8_t[]);
272 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
273                     struct ieee80211_channel[]);
274 static int      iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
275                                    const uint16_t *, const uint16_t *,
276                                    const uint16_t *, const uint16_t *,
277                                    const uint16_t *);
278 static void     iwm_set_hw_address_8000(struct iwm_softc *,
279                                         struct iwm_nvm_data *,
280                                         const uint16_t *, const uint16_t *);
281 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
282                             const uint16_t *);
283 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
284 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
285                                   const uint16_t *);
286 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
287                                    const uint16_t *);
288 static void     iwm_set_radio_cfg(const struct iwm_softc *,
289                                   struct iwm_nvm_data *, uint32_t);
290 static int      iwm_parse_nvm_sections(struct iwm_softc *,
291                                        struct iwm_nvm_section *);
292 static int      iwm_nvm_init(struct iwm_softc *);
293 static int      iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
294                                        const uint8_t *, uint32_t);
295 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
296                                         const uint8_t *, uint32_t);
297 static int      iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
298 static int      iwm_load_cpu_sections_8000(struct iwm_softc *,
299                                            struct iwm_fw_sects *, int , int *);
300 static int      iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
301 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
302 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
303 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
304 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
305 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
306                                               enum iwm_ucode_type);
307 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
308 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
309 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
310 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
311                                             struct iwm_rx_phy_info *);
312 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
313                                       struct iwm_rx_packet *,
314                                       struct iwm_rx_data *);
315 static int      iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
316 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
317                                    struct iwm_rx_data *);
318 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
319                                          struct iwm_rx_packet *,
320                                          struct iwm_node *);
321 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
322                                   struct iwm_rx_data *);
323 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
324 #if 0
325 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
326                                  uint16_t);
327 #endif
328 static const struct iwm_rate *
329         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
330                         struct ieee80211_frame *, struct iwm_tx_cmd *);
331 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
332                        struct ieee80211_node *, int);
333 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
334                              const struct ieee80211_bpf_params *);
335 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
336                                                 struct iwm_mvm_add_sta_cmd_v7 *,
337                                                 int *);
338 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
339                                        int);
340 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
341 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
342 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
343                                            struct iwm_int_sta *,
344                                            const uint8_t *, uint16_t, uint16_t);
345 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
346 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
347 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
348 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
349 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
350 static struct ieee80211_node *
351                 iwm_node_alloc(struct ieee80211vap *,
352                                const uint8_t[IEEE80211_ADDR_LEN]);
353 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
354 static int      iwm_media_change(struct ifnet *);
355 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
356 static void     iwm_endscan_cb(void *, int);
357 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
358                                         struct iwm_sf_cfg_cmd *,
359                                         struct ieee80211_node *);
360 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
361 static int      iwm_send_bt_init_conf(struct iwm_softc *);
362 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
363 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
364 static int      iwm_init_hw(struct iwm_softc *);
365 static void     iwm_init(struct iwm_softc *);
366 static void     iwm_start(struct iwm_softc *);
367 static void     iwm_stop(struct iwm_softc *);
368 static void     iwm_watchdog(void *);
369 static void     iwm_parent(struct ieee80211com *);
370 #ifdef IWM_DEBUG
371 static const char *
372                 iwm_desc_lookup(uint32_t);
373 static void     iwm_nic_error(struct iwm_softc *);
374 static void     iwm_nic_umac_error(struct iwm_softc *);
375 #endif
376 static void     iwm_notif_intr(struct iwm_softc *);
377 static void     iwm_intr(void *);
378 static int      iwm_attach(device_t);
379 static int      iwm_is_valid_ether_addr(uint8_t *);
380 static void     iwm_preinit(void *);
381 static int      iwm_detach_local(struct iwm_softc *sc, int);
382 static void     iwm_init_task(void *);
383 static void     iwm_radiotap_attach(struct iwm_softc *);
384 static struct ieee80211vap *
385                 iwm_vap_create(struct ieee80211com *,
386                                const char [IFNAMSIZ], int,
387                                enum ieee80211_opmode, int,
388                                const uint8_t [IEEE80211_ADDR_LEN],
389                                const uint8_t [IEEE80211_ADDR_LEN]);
390 static void     iwm_vap_delete(struct ieee80211vap *);
391 static void     iwm_scan_start(struct ieee80211com *);
392 static void     iwm_scan_end(struct ieee80211com *);
393 static void     iwm_update_mcast(struct ieee80211com *);
394 static void     iwm_set_channel(struct ieee80211com *);
395 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
396 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
397 static int      iwm_detach(device_t);
398
399 /*
400  * Firmware parser.
401  */
402
403 static int
404 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
405 {
406         const struct iwm_fw_cscheme_list *l = (const void *)data;
407
408         if (dlen < sizeof(*l) ||
409             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
410                 return EINVAL;
411
412         /* we don't actually store anything for now, always use s/w crypto */
413
414         return 0;
415 }
416
417 static int
418 iwm_firmware_store_section(struct iwm_softc *sc,
419     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
420 {
421         struct iwm_fw_sects *fws;
422         struct iwm_fw_onesect *fwone;
423
424         if (type >= IWM_UCODE_TYPE_MAX)
425                 return EINVAL;
426         if (dlen < sizeof(uint32_t))
427                 return EINVAL;
428
429         fws = &sc->sc_fw.fw_sects[type];
430         if (fws->fw_count >= IWM_UCODE_SECT_MAX)
431                 return EINVAL;
432
433         fwone = &fws->fw_sect[fws->fw_count];
434
435         /* first 32bit are device load offset */
436         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
437
438         /* rest is data */
439         fwone->fws_data = data + sizeof(uint32_t);
440         fwone->fws_len = dlen - sizeof(uint32_t);
441
442         fws->fw_count++;
443         fws->fw_totlen += fwone->fws_len;
444
445         return 0;
446 }
447
448 /* iwlwifi: iwl-drv.c */
449 struct iwm_tlv_calib_data {
450         uint32_t ucode_type;
451         struct iwm_tlv_calib_ctrl calib;
452 } __packed;
453
454 static int
455 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
456 {
457         const struct iwm_tlv_calib_data *def_calib = data;
458         uint32_t ucode_type = le32toh(def_calib->ucode_type);
459
460         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
461                 device_printf(sc->sc_dev,
462                     "Wrong ucode_type %u for default "
463                     "calibration.\n", ucode_type);
464                 return EINVAL;
465         }
466
467         sc->sc_default_calib[ucode_type].flow_trigger =
468             def_calib->calib.flow_trigger;
469         sc->sc_default_calib[ucode_type].event_trigger =
470             def_calib->calib.event_trigger;
471
472         return 0;
473 }
474
475 static void
476 iwm_fw_info_free(struct iwm_fw_info *fw)
477 {
478         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
479         fw->fw_fp = NULL;
480         /* don't touch fw->fw_status */
481         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
482 }
483
484 static int
485 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
486 {
487         struct iwm_fw_info *fw = &sc->sc_fw;
488         const struct iwm_tlv_ucode_header *uhdr;
489         struct iwm_ucode_tlv tlv;
490         enum iwm_ucode_tlv_type tlv_type;
491         const struct firmware *fwp;
492         const uint8_t *data;
493         int error = 0;
494         size_t len;
495
496         if (fw->fw_status == IWM_FW_STATUS_DONE &&
497             ucode_type != IWM_UCODE_TYPE_INIT)
498                 return 0;
499
500         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
501                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
502         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
503
504         if (fw->fw_fp != NULL)
505                 iwm_fw_info_free(fw);
506
507         /*
508          * Load firmware into driver memory.
509          * fw_fp will be set.
510          */
511         IWM_UNLOCK(sc);
512         fwp = firmware_get(sc->sc_fwname);
513         IWM_LOCK(sc);
514         if (fwp == NULL) {
515                 device_printf(sc->sc_dev,
516                     "could not read firmware %s (error %d)\n",
517                     sc->sc_fwname, error);
518                 goto out;
519         }
520         fw->fw_fp = fwp;
521
522         /* (Re-)Initialize default values. */
523         sc->sc_capaflags = 0;
524         sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
525         memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
526         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
527
528         /*
529          * Parse firmware contents
530          */
531
532         uhdr = (const void *)fw->fw_fp->data;
533         if (*(const uint32_t *)fw->fw_fp->data != 0
534             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
535                 device_printf(sc->sc_dev, "invalid firmware %s\n",
536                     sc->sc_fwname);
537                 error = EINVAL;
538                 goto out;
539         }
540
541         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
542             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
543             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
544             IWM_UCODE_API(le32toh(uhdr->ver)));
545         data = uhdr->data;
546         len = fw->fw_fp->datasize - sizeof(*uhdr);
547
548         while (len >= sizeof(tlv)) {
549                 size_t tlv_len;
550                 const void *tlv_data;
551
552                 memcpy(&tlv, data, sizeof(tlv));
553                 tlv_len = le32toh(tlv.length);
554                 tlv_type = le32toh(tlv.type);
555
556                 len -= sizeof(tlv);
557                 data += sizeof(tlv);
558                 tlv_data = data;
559
560                 if (len < tlv_len) {
561                         device_printf(sc->sc_dev,
562                             "firmware too short: %zu bytes\n",
563                             len);
564                         error = EINVAL;
565                         goto parse_out;
566                 }
567
568                 switch ((int)tlv_type) {
569                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
570                         if (tlv_len < sizeof(uint32_t)) {
571                                 device_printf(sc->sc_dev,
572                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
573                                     __func__,
574                                     (int) tlv_len);
575                                 error = EINVAL;
576                                 goto parse_out;
577                         }
578                         sc->sc_capa_max_probe_len
579                             = le32toh(*(const uint32_t *)tlv_data);
580                         /* limit it to something sensible */
581                         if (sc->sc_capa_max_probe_len >
582                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
583                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
584                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
585                                     "ridiculous\n", __func__);
586                                 error = EINVAL;
587                                 goto parse_out;
588                         }
589                         break;
590                 case IWM_UCODE_TLV_PAN:
591                         if (tlv_len) {
592                                 device_printf(sc->sc_dev,
593                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
594                                     __func__,
595                                     (int) tlv_len);
596                                 error = EINVAL;
597                                 goto parse_out;
598                         }
599                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
600                         break;
601                 case IWM_UCODE_TLV_FLAGS:
602                         if (tlv_len < sizeof(uint32_t)) {
603                                 device_printf(sc->sc_dev,
604                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
605                                     __func__,
606                                     (int) tlv_len);
607                                 error = EINVAL;
608                                 goto parse_out;
609                         }
610                         /*
611                          * Apparently there can be many flags, but Linux driver
612                          * parses only the first one, and so do we.
613                          *
614                          * XXX: why does this override IWM_UCODE_TLV_PAN?
615                          * Intentional or a bug?  Observations from
616                          * current firmware file:
617                          *  1) TLV_PAN is parsed first
618                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
619                          * ==> this resets TLV_PAN to itself... hnnnk
620                          */
621                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
622                         break;
623                 case IWM_UCODE_TLV_CSCHEME:
624                         if ((error = iwm_store_cscheme(sc,
625                             tlv_data, tlv_len)) != 0) {
626                                 device_printf(sc->sc_dev,
627                                     "%s: iwm_store_cscheme(): returned %d\n",
628                                     __func__,
629                                     error);
630                                 goto parse_out;
631                         }
632                         break;
633                 case IWM_UCODE_TLV_NUM_OF_CPU: {
634                         uint32_t num_cpu;
635                         if (tlv_len != sizeof(uint32_t)) {
636                                 device_printf(sc->sc_dev,
637                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
638                                     __func__,
639                                     (int) tlv_len);
640                                 error = EINVAL;
641                                 goto parse_out;
642                         }
643                         num_cpu = le32toh(*(const uint32_t *)tlv_data);
644                         if (num_cpu < 1 || num_cpu > 2) {
645                                 device_printf(sc->sc_dev,
646                                     "%s: Driver supports only 1 or 2 CPUs\n",
647                                     __func__);
648                                 error = EINVAL;
649                                 goto parse_out;
650                         }
651                         break;
652                 }
653                 case IWM_UCODE_TLV_SEC_RT:
654                         if ((error = iwm_firmware_store_section(sc,
655                             IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
656                                 device_printf(sc->sc_dev,
657                                     "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
658                                     __func__,
659                                     error);
660                                 goto parse_out;
661                         }
662                         break;
663                 case IWM_UCODE_TLV_SEC_INIT:
664                         if ((error = iwm_firmware_store_section(sc,
665                             IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
666                                 device_printf(sc->sc_dev,
667                                     "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
668                                     __func__,
669                                     error);
670                                 goto parse_out;
671                         }
672                         break;
673                 case IWM_UCODE_TLV_SEC_WOWLAN:
674                         if ((error = iwm_firmware_store_section(sc,
675                             IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
676                                 device_printf(sc->sc_dev,
677                                     "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
678                                     __func__,
679                                     error);
680                                 goto parse_out;
681                         }
682                         break;
683                 case IWM_UCODE_TLV_DEF_CALIB:
684                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
685                                 device_printf(sc->sc_dev,
686                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
687                                     __func__,
688                                     (int) tlv_len,
689                                     (int) sizeof(struct iwm_tlv_calib_data));
690                                 error = EINVAL;
691                                 goto parse_out;
692                         }
693                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
694                                 device_printf(sc->sc_dev,
695                                     "%s: iwm_set_default_calib() failed: %d\n",
696                                     __func__,
697                                     error);
698                                 goto parse_out;
699                         }
700                         break;
701                 case IWM_UCODE_TLV_PHY_SKU:
702                         if (tlv_len != sizeof(uint32_t)) {
703                                 error = EINVAL;
704                                 device_printf(sc->sc_dev,
705                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
706                                     __func__,
707                                     (int) tlv_len);
708                                 goto parse_out;
709                         }
710                         sc->sc_fw_phy_config =
711                             le32toh(*(const uint32_t *)tlv_data);
712                         break;
713
714                 case IWM_UCODE_TLV_API_CHANGES_SET: {
715                         const struct iwm_ucode_api *api;
716                         if (tlv_len != sizeof(*api)) {
717                                 error = EINVAL;
718                                 goto parse_out;
719                         }
720                         api = (const struct iwm_ucode_api *)tlv_data;
721                         /* Flags may exceed 32 bits in future firmware. */
722                         if (le32toh(api->api_index) > 0) {
723                                 device_printf(sc->sc_dev,
724                                     "unsupported API index %d\n",
725                                     le32toh(api->api_index));
726                                 goto parse_out;
727                         }
728                         sc->sc_ucode_api = le32toh(api->api_flags);
729                         break;
730                 }
731
732                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
733                         const struct iwm_ucode_capa *capa;
734                         int idx, i;
735                         if (tlv_len != sizeof(*capa)) {
736                                 error = EINVAL;
737                                 goto parse_out;
738                         }
739                         capa = (const struct iwm_ucode_capa *)tlv_data;
740                         idx = le32toh(capa->api_index);
741                         if (idx > howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
742                                 device_printf(sc->sc_dev,
743                                     "unsupported API index %d\n", idx);
744                                 goto parse_out;
745                         }
746                         for (i = 0; i < 32; i++) {
747                                 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
748                                         continue;
749                                 setbit(sc->sc_enabled_capa, i + (32 * idx));
750                         }
751                         break;
752                 }
753
754                 case 48: /* undocumented TLV */
755                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
756                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
757                         /* ignore, not used by current driver */
758                         break;
759
760                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
761                         if ((error = iwm_firmware_store_section(sc,
762                             IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
763                             tlv_len)) != 0)
764                                 goto parse_out;
765                         break;
766
767                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
768                         if (tlv_len != sizeof(uint32_t)) {
769                                 error = EINVAL;
770                                 goto parse_out;
771                         }
772                         sc->sc_capa_n_scan_channels =
773                           le32toh(*(const uint32_t *)tlv_data);
774                         break;
775
776                 case IWM_UCODE_TLV_FW_VERSION:
777                         if (tlv_len != sizeof(uint32_t) * 3) {
778                                 error = EINVAL;
779                                 goto parse_out;
780                         }
781                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
782                             "%d.%d.%d",
783                             le32toh(((const uint32_t *)tlv_data)[0]),
784                             le32toh(((const uint32_t *)tlv_data)[1]),
785                             le32toh(((const uint32_t *)tlv_data)[2]));
786                         break;
787
788                 default:
789                         device_printf(sc->sc_dev,
790                             "%s: unknown firmware section %d, abort\n",
791                             __func__, tlv_type);
792                         error = EINVAL;
793                         goto parse_out;
794                 }
795
796                 len -= roundup(tlv_len, 4);
797                 data += roundup(tlv_len, 4);
798         }
799
800         KASSERT(error == 0, ("unhandled error"));
801
802  parse_out:
803         if (error) {
804                 device_printf(sc->sc_dev, "firmware parse error %d, "
805                     "section type %d\n", error, tlv_type);
806         }
807
808         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
809                 device_printf(sc->sc_dev,
810                     "device uses unsupported power ops\n");
811                 error = ENOTSUP;
812         }
813
814  out:
815         if (error) {
816                 fw->fw_status = IWM_FW_STATUS_NONE;
817                 if (fw->fw_fp != NULL)
818                         iwm_fw_info_free(fw);
819         } else
820                 fw->fw_status = IWM_FW_STATUS_DONE;
821         wakeup(&sc->sc_fw);
822
823         return error;
824 }
825
826 /*
827  * DMA resource routines
828  */
829
830 static void
831 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
832 {
833         if (error != 0)
834                 return;
835         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
836         *(bus_addr_t *)arg = segs[0].ds_addr;
837 }
838
839 static int
840 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
841     bus_size_t size, bus_size_t alignment)
842 {
843         int error;
844
845         dma->tag = NULL;
846         dma->map = NULL;
847         dma->size = size;
848         dma->vaddr = NULL;
849
850         error = bus_dma_tag_create(tag, alignment,
851             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
852             1, size, 0, NULL, NULL, &dma->tag);
853         if (error != 0)
854                 goto fail;
855
856         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
857             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
858         if (error != 0)
859                 goto fail;
860
861         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
862             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
863         if (error != 0) {
864                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
865                 dma->vaddr = NULL;
866                 goto fail;
867         }
868
869         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
870
871         return 0;
872
873 fail:
874         iwm_dma_contig_free(dma);
875
876         return error;
877 }
878
879 static void
880 iwm_dma_contig_free(struct iwm_dma_info *dma)
881 {
882         if (dma->vaddr != NULL) {
883                 bus_dmamap_sync(dma->tag, dma->map,
884                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
885                 bus_dmamap_unload(dma->tag, dma->map);
886                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
887                 dma->vaddr = NULL;
888         }
889         if (dma->tag != NULL) {
890                 bus_dma_tag_destroy(dma->tag);
891                 dma->tag = NULL;
892         }
893 }
894
895 /* fwmem is used to load firmware onto the card */
896 static int
897 iwm_alloc_fwmem(struct iwm_softc *sc)
898 {
899         /* Must be aligned on a 16-byte boundary. */
900         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
901             sc->sc_fwdmasegsz, 16);
902 }
903
904 static void
905 iwm_free_fwmem(struct iwm_softc *sc)
906 {
907         iwm_dma_contig_free(&sc->fw_dma);
908 }
909
910 /* tx scheduler rings.  not used? */
911 static int
912 iwm_alloc_sched(struct iwm_softc *sc)
913 {
914         /* TX scheduler rings must be aligned on a 1KB boundary. */
915         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
916             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
917 }
918
919 static void
920 iwm_free_sched(struct iwm_softc *sc)
921 {
922         iwm_dma_contig_free(&sc->sched_dma);
923 }
924
925 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
926 static int
927 iwm_alloc_kw(struct iwm_softc *sc)
928 {
929         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
930 }
931
932 static void
933 iwm_free_kw(struct iwm_softc *sc)
934 {
935         iwm_dma_contig_free(&sc->kw_dma);
936 }
937
938 /* interrupt cause table */
939 static int
940 iwm_alloc_ict(struct iwm_softc *sc)
941 {
942         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
943             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
944 }
945
946 static void
947 iwm_free_ict(struct iwm_softc *sc)
948 {
949         iwm_dma_contig_free(&sc->ict_dma);
950 }
951
952 static int
953 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
954 {
955         bus_size_t size;
956         int i, error;
957
958         ring->cur = 0;
959
960         /* Allocate RX descriptors (256-byte aligned). */
961         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
962         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
963         if (error != 0) {
964                 device_printf(sc->sc_dev,
965                     "could not allocate RX ring DMA memory\n");
966                 goto fail;
967         }
968         ring->desc = ring->desc_dma.vaddr;
969
970         /* Allocate RX status area (16-byte aligned). */
971         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
972             sizeof(*ring->stat), 16);
973         if (error != 0) {
974                 device_printf(sc->sc_dev,
975                     "could not allocate RX status DMA memory\n");
976                 goto fail;
977         }
978         ring->stat = ring->stat_dma.vaddr;
979
980         /* Create RX buffer DMA tag. */
981         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
982             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
983             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
984         if (error != 0) {
985                 device_printf(sc->sc_dev,
986                     "%s: could not create RX buf DMA tag, error %d\n",
987                     __func__, error);
988                 goto fail;
989         }
990
991         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
992         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
993         if (error != 0) {
994                 device_printf(sc->sc_dev,
995                     "%s: could not create RX buf DMA map, error %d\n",
996                     __func__, error);
997                 goto fail;
998         }
999         /*
1000          * Allocate and map RX buffers.
1001          */
1002         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1003                 struct iwm_rx_data *data = &ring->data[i];
1004                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1005                 if (error != 0) {
1006                         device_printf(sc->sc_dev,
1007                             "%s: could not create RX buf DMA map, error %d\n",
1008                             __func__, error);
1009                         goto fail;
1010                 }
1011                 data->m = NULL;
1012
1013                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1014                         goto fail;
1015                 }
1016         }
1017         return 0;
1018
1019 fail:   iwm_free_rx_ring(sc, ring);
1020         return error;
1021 }
1022
1023 static void
1024 iwm_disable_rx_dma(struct iwm_softc *sc)
1025 {
1026         /* XXX conditional nic locks are stupid */
1027         /* XXX print out if we can't lock the NIC? */
1028         if (iwm_nic_lock(sc)) {
1029                 /* XXX handle if RX stop doesn't finish? */
1030                 (void) iwm_pcie_rx_stop(sc);
1031                 iwm_nic_unlock(sc);
1032         }
1033 }
1034
1035 static void
1036 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1037 {
1038         /* Reset the ring state */
1039         ring->cur = 0;
1040
1041         /*
1042          * The hw rx ring index in shared memory must also be cleared,
1043          * otherwise the discrepancy can cause reprocessing chaos.
1044          */
1045         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1046 }
1047
1048 static void
1049 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1050 {
1051         int i;
1052
1053         iwm_dma_contig_free(&ring->desc_dma);
1054         iwm_dma_contig_free(&ring->stat_dma);
1055
1056         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1057                 struct iwm_rx_data *data = &ring->data[i];
1058
1059                 if (data->m != NULL) {
1060                         bus_dmamap_sync(ring->data_dmat, data->map,
1061                             BUS_DMASYNC_POSTREAD);
1062                         bus_dmamap_unload(ring->data_dmat, data->map);
1063                         m_freem(data->m);
1064                         data->m = NULL;
1065                 }
1066                 if (data->map != NULL) {
1067                         bus_dmamap_destroy(ring->data_dmat, data->map);
1068                         data->map = NULL;
1069                 }
1070         }
1071         if (ring->spare_map != NULL) {
1072                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1073                 ring->spare_map = NULL;
1074         }
1075         if (ring->data_dmat != NULL) {
1076                 bus_dma_tag_destroy(ring->data_dmat);
1077                 ring->data_dmat = NULL;
1078         }
1079 }
1080
1081 static int
1082 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1083 {
1084         bus_addr_t paddr;
1085         bus_size_t size;
1086         size_t maxsize;
1087         int nsegments;
1088         int i, error;
1089
1090         ring->qid = qid;
1091         ring->queued = 0;
1092         ring->cur = 0;
1093
1094         /* Allocate TX descriptors (256-byte aligned). */
1095         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1096         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1097         if (error != 0) {
1098                 device_printf(sc->sc_dev,
1099                     "could not allocate TX ring DMA memory\n");
1100                 goto fail;
1101         }
1102         ring->desc = ring->desc_dma.vaddr;
1103
1104         /*
1105          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1106          * to allocate commands space for other rings.
1107          */
1108         if (qid > IWM_MVM_CMD_QUEUE)
1109                 return 0;
1110
1111         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1112         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1113         if (error != 0) {
1114                 device_printf(sc->sc_dev,
1115                     "could not allocate TX cmd DMA memory\n");
1116                 goto fail;
1117         }
1118         ring->cmd = ring->cmd_dma.vaddr;
1119
1120         /* FW commands may require more mapped space than packets. */
1121         if (qid == IWM_MVM_CMD_QUEUE) {
1122                 maxsize = IWM_RBUF_SIZE;
1123                 nsegments = 1;
1124         } else {
1125                 maxsize = MCLBYTES;
1126                 nsegments = IWM_MAX_SCATTER - 2;
1127         }
1128
1129         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1130             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1131             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1132         if (error != 0) {
1133                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1134                 goto fail;
1135         }
1136
1137         paddr = ring->cmd_dma.paddr;
1138         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1139                 struct iwm_tx_data *data = &ring->data[i];
1140
1141                 data->cmd_paddr = paddr;
1142                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1143                     + offsetof(struct iwm_tx_cmd, scratch);
1144                 paddr += sizeof(struct iwm_device_cmd);
1145
1146                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1147                 if (error != 0) {
1148                         device_printf(sc->sc_dev,
1149                             "could not create TX buf DMA map\n");
1150                         goto fail;
1151                 }
1152         }
1153         KASSERT(paddr == ring->cmd_dma.paddr + size,
1154             ("invalid physical address"));
1155         return 0;
1156
1157 fail:   iwm_free_tx_ring(sc, ring);
1158         return error;
1159 }
1160
1161 static void
1162 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1163 {
1164         int i;
1165
1166         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1167                 struct iwm_tx_data *data = &ring->data[i];
1168
1169                 if (data->m != NULL) {
1170                         bus_dmamap_sync(ring->data_dmat, data->map,
1171                             BUS_DMASYNC_POSTWRITE);
1172                         bus_dmamap_unload(ring->data_dmat, data->map);
1173                         m_freem(data->m);
1174                         data->m = NULL;
1175                 }
1176         }
1177         /* Clear TX descriptors. */
1178         memset(ring->desc, 0, ring->desc_dma.size);
1179         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1180             BUS_DMASYNC_PREWRITE);
1181         sc->qfullmsk &= ~(1 << ring->qid);
1182         ring->queued = 0;
1183         ring->cur = 0;
1184 }
1185
1186 static void
1187 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1188 {
1189         int i;
1190
1191         iwm_dma_contig_free(&ring->desc_dma);
1192         iwm_dma_contig_free(&ring->cmd_dma);
1193
1194         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1195                 struct iwm_tx_data *data = &ring->data[i];
1196
1197                 if (data->m != NULL) {
1198                         bus_dmamap_sync(ring->data_dmat, data->map,
1199                             BUS_DMASYNC_POSTWRITE);
1200                         bus_dmamap_unload(ring->data_dmat, data->map);
1201                         m_freem(data->m);
1202                         data->m = NULL;
1203                 }
1204                 if (data->map != NULL) {
1205                         bus_dmamap_destroy(ring->data_dmat, data->map);
1206                         data->map = NULL;
1207                 }
1208         }
1209         if (ring->data_dmat != NULL) {
1210                 bus_dma_tag_destroy(ring->data_dmat);
1211                 ring->data_dmat = NULL;
1212         }
1213 }
1214
1215 /*
1216  * High-level hardware frobbing routines
1217  */
1218
1219 static void
1220 iwm_enable_interrupts(struct iwm_softc *sc)
1221 {
1222         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1223         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1224 }
1225
1226 static void
1227 iwm_restore_interrupts(struct iwm_softc *sc)
1228 {
1229         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1230 }
1231
1232 static void
1233 iwm_disable_interrupts(struct iwm_softc *sc)
1234 {
1235         /* disable interrupts */
1236         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1237
1238         /* acknowledge all interrupts */
1239         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1240         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1241 }
1242
1243 static void
1244 iwm_ict_reset(struct iwm_softc *sc)
1245 {
1246         iwm_disable_interrupts(sc);
1247
1248         /* Reset ICT table. */
1249         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1250         sc->ict_cur = 0;
1251
1252         /* Set physical address of ICT table (4KB aligned). */
1253         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1254             IWM_CSR_DRAM_INT_TBL_ENABLE
1255             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1256             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1257             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1258
1259         /* Switch to ICT interrupt mode in driver. */
1260         sc->sc_flags |= IWM_FLAG_USE_ICT;
1261
1262         /* Re-enable interrupts. */
1263         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1264         iwm_enable_interrupts(sc);
1265 }
1266
1267 /* iwlwifi pcie/trans.c */
1268
1269 /*
1270  * Since this .. hard-resets things, it's time to actually
1271  * mark the first vap (if any) as having no mac context.
1272  * It's annoying, but since the driver is potentially being
1273  * stop/start'ed whilst active (thanks openbsd port!) we
1274  * have to correctly track this.
1275  */
1276 static void
1277 iwm_stop_device(struct iwm_softc *sc)
1278 {
1279         struct ieee80211com *ic = &sc->sc_ic;
1280         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1281         int chnl, qid;
1282         uint32_t mask = 0;
1283
1284         /* tell the device to stop sending interrupts */
1285         iwm_disable_interrupts(sc);
1286
1287         /*
1288          * FreeBSD-local: mark the first vap as not-uploaded,
1289          * so the next transition through auth/assoc
1290          * will correctly populate the MAC context.
1291          */
1292         if (vap) {
1293                 struct iwm_vap *iv = IWM_VAP(vap);
1294                 iv->is_uploaded = 0;
1295         }
1296
1297         /* device going down, Stop using ICT table */
1298         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1299
1300         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1301
1302         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1303
1304         if (iwm_nic_lock(sc)) {
1305                 /* Stop each Tx DMA channel */
1306                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1307                         IWM_WRITE(sc,
1308                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1309                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1310                 }
1311
1312                 /* Wait for DMA channels to be idle */
1313                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1314                     5000)) {
1315                         device_printf(sc->sc_dev,
1316                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1317                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1318                 }
1319                 iwm_nic_unlock(sc);
1320         }
1321         iwm_disable_rx_dma(sc);
1322
1323         /* Stop RX ring. */
1324         iwm_reset_rx_ring(sc, &sc->rxq);
1325
1326         /* Reset all TX rings. */
1327         for (qid = 0; qid < nitems(sc->txq); qid++)
1328                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1329
1330         /*
1331          * Power-down device's busmaster DMA clocks
1332          */
1333         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1334         DELAY(5);
1335
1336         /* Make sure (redundant) we've released our request to stay awake */
1337         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1338             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1339
1340         /* Stop the device, and put it in low power state */
1341         iwm_apm_stop(sc);
1342
1343         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1344          * Clean again the interrupt here
1345          */
1346         iwm_disable_interrupts(sc);
1347         /* stop and reset the on-board processor */
1348         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1349
1350         /*
1351          * Even if we stop the HW, we still want the RF kill
1352          * interrupt
1353          */
1354         iwm_enable_rfkill_int(sc);
1355         iwm_check_rfkill(sc);
1356 }
1357
1358 /* iwlwifi: mvm/ops.c */
1359 static void
1360 iwm_mvm_nic_config(struct iwm_softc *sc)
1361 {
1362         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1363         uint32_t reg_val = 0;
1364
1365         radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1366             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1367         radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1368             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1369         radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1370             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1371
1372         /* SKU control */
1373         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1374             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1375         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1376             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1377
1378         /* radio configuration */
1379         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1380         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1381         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1382
1383         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1384
1385         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1386             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1387             radio_cfg_step, radio_cfg_dash);
1388
1389         /*
1390          * W/A : NIC is stuck in a reset state after Early PCIe power off
1391          * (PCIe power is lost before PERST# is asserted), causing ME FW
1392          * to lose ownership and not being able to obtain it back.
1393          */
1394         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1395                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1396                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1397                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1398         }
1399 }
1400
1401 static int
1402 iwm_nic_rx_init(struct iwm_softc *sc)
1403 {
1404         if (!iwm_nic_lock(sc))
1405                 return EBUSY;
1406
1407         /*
1408          * Initialize RX ring.  This is from the iwn driver.
1409          */
1410         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1411
1412         /* stop DMA */
1413         iwm_disable_rx_dma(sc);
1414         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1415         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1416         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1417         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1418
1419         /* Set physical address of RX ring (256-byte aligned). */
1420         IWM_WRITE(sc,
1421             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1422
1423         /* Set physical address of RX status (16-byte aligned). */
1424         IWM_WRITE(sc,
1425             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1426
1427         /* Enable RX. */
1428         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1429             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1430             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1431             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1432             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1433             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1434             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1435             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1436
1437         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1438
1439         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1440         if (sc->host_interrupt_operation_mode)
1441                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1442
1443         /*
1444          * Thus sayeth el jefe (iwlwifi) via a comment:
1445          *
1446          * This value should initially be 0 (before preparing any
1447          * RBs), should be 8 after preparing the first 8 RBs (for example)
1448          */
1449         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1450
1451         iwm_nic_unlock(sc);
1452
1453         return 0;
1454 }
1455
1456 static int
1457 iwm_nic_tx_init(struct iwm_softc *sc)
1458 {
1459         int qid;
1460
1461         if (!iwm_nic_lock(sc))
1462                 return EBUSY;
1463
1464         /* Deactivate TX scheduler. */
1465         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1466
1467         /* Set physical address of "keep warm" page (16-byte aligned). */
1468         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1469
1470         /* Initialize TX rings. */
1471         for (qid = 0; qid < nitems(sc->txq); qid++) {
1472                 struct iwm_tx_ring *txq = &sc->txq[qid];
1473
1474                 /* Set physical address of TX ring (256-byte aligned). */
1475                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1476                     txq->desc_dma.paddr >> 8);
1477                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1478                     "%s: loading ring %d descriptors (%p) at %lx\n",
1479                     __func__,
1480                     qid, txq->desc,
1481                     (unsigned long) (txq->desc_dma.paddr >> 8));
1482         }
1483
1484         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1485
1486         iwm_nic_unlock(sc);
1487
1488         return 0;
1489 }
1490
1491 static int
1492 iwm_nic_init(struct iwm_softc *sc)
1493 {
1494         int error;
1495
1496         iwm_apm_init(sc);
1497         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1498                 iwm_set_pwr(sc);
1499
1500         iwm_mvm_nic_config(sc);
1501
1502         if ((error = iwm_nic_rx_init(sc)) != 0)
1503                 return error;
1504
1505         /*
1506          * Ditto for TX, from iwn
1507          */
1508         if ((error = iwm_nic_tx_init(sc)) != 0)
1509                 return error;
1510
1511         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1512             "%s: shadow registers enabled\n", __func__);
1513         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1514
1515         return 0;
1516 }
1517
1518 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1519         IWM_MVM_TX_FIFO_VO,
1520         IWM_MVM_TX_FIFO_VI,
1521         IWM_MVM_TX_FIFO_BE,
1522         IWM_MVM_TX_FIFO_BK,
1523 };
1524
1525 static int
1526 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1527 {
1528         if (!iwm_nic_lock(sc)) {
1529                 device_printf(sc->sc_dev,
1530                     "%s: cannot enable txq %d\n",
1531                     __func__,
1532                     qid);
1533                 return EBUSY;
1534         }
1535
1536         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1537
1538         if (qid == IWM_MVM_CMD_QUEUE) {
1539                 /* unactivate before configuration */
1540                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1541                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1542                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1543
1544                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1545
1546                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1547
1548                 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1549                 /* Set scheduler window size and frame limit. */
1550                 iwm_write_mem32(sc,
1551                     sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1552                     sizeof(uint32_t),
1553                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1554                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1555                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1556                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1557
1558                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1559                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1560                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1561                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1562                     IWM_SCD_QUEUE_STTS_REG_MSK);
1563         } else {
1564                 struct iwm_scd_txq_cfg_cmd cmd;
1565                 int error;
1566
1567                 iwm_nic_unlock(sc);
1568
1569                 memset(&cmd, 0, sizeof(cmd));
1570                 cmd.scd_queue = qid;
1571                 cmd.enable = 1;
1572                 cmd.sta_id = sta_id;
1573                 cmd.tx_fifo = fifo;
1574                 cmd.aggregate = 0;
1575                 cmd.window = IWM_FRAME_LIMIT;
1576
1577                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1578                     sizeof(cmd), &cmd);
1579                 if (error) {
1580                         device_printf(sc->sc_dev,
1581                             "cannot enable txq %d\n", qid);
1582                         return error;
1583                 }
1584
1585                 if (!iwm_nic_lock(sc))
1586                         return EBUSY;
1587         }
1588
1589         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1590             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1591
1592         iwm_nic_unlock(sc);
1593
1594         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1595             __func__, qid, fifo);
1596
1597         return 0;
1598 }
1599
1600 static int
1601 iwm_post_alive(struct iwm_softc *sc)
1602 {
1603         int nwords;
1604         int error, chnl;
1605         uint32_t base;
1606
1607         if (!iwm_nic_lock(sc))
1608                 return EBUSY;
1609
1610         base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1611         if (sc->sched_base != base) {
1612                 device_printf(sc->sc_dev,
1613                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1614                     __func__, sc->sched_base, base);
1615         }
1616
1617         iwm_ict_reset(sc);
1618
1619         /* Clear TX scheduler state in SRAM. */
1620         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1621             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1622             / sizeof(uint32_t);
1623         error = iwm_write_mem(sc,
1624             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1625             NULL, nwords);
1626         if (error)
1627                 goto out;
1628
1629         /* Set physical address of TX scheduler rings (1KB aligned). */
1630         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1631
1632         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1633
1634         iwm_nic_unlock(sc);
1635
1636         /* enable command channel */
1637         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1638         if (error)
1639                 return error;
1640
1641         if (!iwm_nic_lock(sc))
1642                 return EBUSY;
1643
1644         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1645
1646         /* Enable DMA channels. */
1647         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1648                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1649                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1650                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1651         }
1652
1653         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1654             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1655
1656         /* Enable L1-Active */
1657         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1658                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1659                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1660         }
1661
1662  out:
1663         iwm_nic_unlock(sc);
1664         return error;
1665 }
1666
1667 /*
1668  * NVM read access and content parsing.  We do not support
1669  * external NVM or writing NVM.
1670  * iwlwifi/mvm/nvm.c
1671  */
1672
1673 /* list of NVM sections we are allowed/need to read */
1674 const int nvm_to_read[] = {
1675         IWM_NVM_SECTION_TYPE_HW,
1676         IWM_NVM_SECTION_TYPE_SW,
1677         IWM_NVM_SECTION_TYPE_REGULATORY,
1678         IWM_NVM_SECTION_TYPE_CALIBRATION,
1679         IWM_NVM_SECTION_TYPE_PRODUCTION,
1680         IWM_NVM_SECTION_TYPE_HW_8000,
1681         IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
1682         IWM_NVM_SECTION_TYPE_PHY_SKU,
1683 };
1684
1685 /* Default NVM size to read */
1686 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1687 #define IWM_MAX_NVM_SECTION_SIZE        8192
1688
1689 #define IWM_NVM_WRITE_OPCODE 1
1690 #define IWM_NVM_READ_OPCODE 0
1691
1692 /* load nvm chunk response */
1693 #define IWM_READ_NVM_CHUNK_SUCCEED              0
1694 #define IWM_READ_NVM_CHUNK_INVALID_ADDRESS      1
1695
1696 static int
1697 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1698         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1699 {
1700         offset = 0;
1701         struct iwm_nvm_access_cmd nvm_access_cmd = {
1702                 .offset = htole16(offset),
1703                 .length = htole16(length),
1704                 .type = htole16(section),
1705                 .op_code = IWM_NVM_READ_OPCODE,
1706         };
1707         struct iwm_nvm_access_resp *nvm_resp;
1708         struct iwm_rx_packet *pkt;
1709         struct iwm_host_cmd cmd = {
1710                 .id = IWM_NVM_ACCESS_CMD,
1711                 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1712                     IWM_CMD_SEND_IN_RFKILL,
1713                 .data = { &nvm_access_cmd, },
1714         };
1715         int ret, offset_read;
1716         size_t bytes_read;
1717         uint8_t *resp_data;
1718
1719         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1720
1721         ret = iwm_send_cmd(sc, &cmd);
1722         if (ret) {
1723                 device_printf(sc->sc_dev,
1724                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1725                 return ret;
1726         }
1727
1728         pkt = cmd.resp_pkt;
1729         if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1730                 device_printf(sc->sc_dev,
1731                     "Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1732                     pkt->hdr.flags);
1733                 ret = EIO;
1734                 goto exit;
1735         }
1736
1737         /* Extract NVM response */
1738         nvm_resp = (void *)pkt->data;
1739
1740         ret = le16toh(nvm_resp->status);
1741         bytes_read = le16toh(nvm_resp->length);
1742         offset_read = le16toh(nvm_resp->offset);
1743         resp_data = nvm_resp->data;
1744         if (ret) {
1745                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1746                     "NVM access command failed with status %d\n", ret);
1747                 ret = EINVAL;
1748                 goto exit;
1749         }
1750
1751         if (offset_read != offset) {
1752                 device_printf(sc->sc_dev,
1753                     "NVM ACCESS response with invalid offset %d\n",
1754                     offset_read);
1755                 ret = EINVAL;
1756                 goto exit;
1757         }
1758
1759         if (bytes_read > length) {
1760                 device_printf(sc->sc_dev,
1761                     "NVM ACCESS response with too much data "
1762                     "(%d bytes requested, %zd bytes received)\n",
1763                     length, bytes_read);
1764                 ret = EINVAL;
1765                 goto exit;
1766         }
1767
1768         memcpy(data + offset, resp_data, bytes_read);
1769         *len = bytes_read;
1770
1771  exit:
1772         iwm_free_resp(sc, &cmd);
1773         return ret;
1774 }
1775
1776 /*
1777  * Reads an NVM section completely.
1778  * NICs prior to 7000 family don't have a real NVM, but just read
1779  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1780  * by uCode, we need to manually check in this case that we don't
1781  * overflow and try to read more than the EEPROM size.
1782  * For 7000 family NICs, we supply the maximal size we can read, and
1783  * the uCode fills the response with as much data as we can,
1784  * without overflowing, so no check is needed.
1785  */
1786 static int
1787 iwm_nvm_read_section(struct iwm_softc *sc,
1788         uint16_t section, uint8_t *data, uint16_t *len, size_t max_len)
1789 {
1790         uint16_t chunklen, seglen;
1791         int error = 0;
1792
1793         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1794             "reading NVM section %d\n", section);
1795
1796         chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1797         *len = 0;
1798
1799         /* Read NVM chunks until exhausted (reading less than requested) */
1800         while (seglen == chunklen && *len < max_len) {
1801                 error = iwm_nvm_read_chunk(sc,
1802                     section, *len, chunklen, data, &seglen);
1803                 if (error) {
1804                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1805                             "Cannot read from NVM section "
1806                             "%d at offset %d\n", section, *len);
1807                         return error;
1808                 }
1809                 *len += seglen;
1810         }
1811
1812         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1813             "NVM section %d read completed (%d bytes, error=%d)\n",
1814             section, *len, error);
1815         return error;
1816 }
1817
1818 /*
1819  * BEGIN IWM_NVM_PARSE
1820  */
1821
1822 /* iwlwifi/iwl-nvm-parse.c */
1823
1824 /* NVM offsets (in words) definitions */
1825 enum iwm_nvm_offsets {
1826         /* NVM HW-Section offset (in words) definitions */
1827         IWM_HW_ADDR = 0x15,
1828
1829 /* NVM SW-Section offset (in words) definitions */
1830         IWM_NVM_SW_SECTION = 0x1C0,
1831         IWM_NVM_VERSION = 0,
1832         IWM_RADIO_CFG = 1,
1833         IWM_SKU = 2,
1834         IWM_N_HW_ADDRS = 3,
1835         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1836
1837 /* NVM calibration section offset (in words) definitions */
1838         IWM_NVM_CALIB_SECTION = 0x2B8,
1839         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1840 };
1841
1842 enum iwm_8000_nvm_offsets {
1843         /* NVM HW-Section offset (in words) definitions */
1844         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1845         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1846         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1847         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1848         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1849
1850         /* NVM SW-Section offset (in words) definitions */
1851         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1852         IWM_NVM_VERSION_8000 = 0,
1853         IWM_RADIO_CFG_8000 = 0,
1854         IWM_SKU_8000 = 2,
1855         IWM_N_HW_ADDRS_8000 = 3,
1856
1857         /* NVM REGULATORY -Section offset (in words) definitions */
1858         IWM_NVM_CHANNELS_8000 = 0,
1859         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1860         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1861         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1862
1863         /* NVM calibration section offset (in words) definitions */
1864         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1865         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1866 };
1867
1868 /* SKU Capabilities (actual values from NVM definition) */
1869 enum nvm_sku_bits {
1870         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1871         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1872         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1873         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1874 };
1875
1876 /* radio config bits (actual values from NVM definition) */
1877 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1878 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1879 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1880 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1881 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1882 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1883
1884 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1885 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1886 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1887 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1888 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1889 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1890
1891 #define DEFAULT_MAX_TX_POWER 16
1892
1893 /**
1894  * enum iwm_nvm_channel_flags - channel flags in NVM
1895  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1896  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1897  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1898  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1899  * XXX cannot find this (DFS) flag in iwl-nvm-parse.c
1900  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1901  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1902  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1903  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1904  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1905  */
1906 enum iwm_nvm_channel_flags {
1907         IWM_NVM_CHANNEL_VALID = (1 << 0),
1908         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1909         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1910         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1911         IWM_NVM_CHANNEL_DFS = (1 << 7),
1912         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1913         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1914         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1915         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1916 };
1917
1918 /*
1919  * Translate EEPROM flags to net80211.
1920  */
1921 static uint32_t
1922 iwm_eeprom_channel_flags(uint16_t ch_flags)
1923 {
1924         uint32_t nflags;
1925
1926         nflags = 0;
1927         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1928                 nflags |= IEEE80211_CHAN_PASSIVE;
1929         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1930                 nflags |= IEEE80211_CHAN_NOADHOC;
1931         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1932                 nflags |= IEEE80211_CHAN_DFS;
1933                 /* Just in case. */
1934                 nflags |= IEEE80211_CHAN_NOADHOC;
1935         }
1936
1937         return (nflags);
1938 }
1939
1940 static void
1941 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1942     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1943     const uint8_t bands[])
1944 {
1945         const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
1946         uint32_t nflags;
1947         uint16_t ch_flags;
1948         uint8_t ieee;
1949         int error;
1950
1951         for (; ch_idx < ch_num; ch_idx++) {
1952                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1953                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1954                         ieee = iwm_nvm_channels[ch_idx];
1955                 else
1956                         ieee = iwm_nvm_channels_8000[ch_idx];
1957
1958                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1959                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1960                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
1961                             ieee, ch_flags,
1962                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1963                             "5.2" : "2.4");
1964                         continue;
1965                 }
1966
1967                 nflags = iwm_eeprom_channel_flags(ch_flags);
1968                 error = ieee80211_add_channel(chans, maxchans, nchans,
1969                     ieee, 0, 0, nflags, bands);
1970                 if (error != 0)
1971                         break;
1972
1973                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1974                     "Ch. %d Flags %x [%sGHz] - Added\n",
1975                     ieee, ch_flags,
1976                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1977                     "5.2" : "2.4");
1978         }
1979 }
1980
1981 static void
1982 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1983     struct ieee80211_channel chans[])
1984 {
1985         struct iwm_softc *sc = ic->ic_softc;
1986         struct iwm_nvm_data *data = &sc->sc_nvm;
1987         uint8_t bands[IEEE80211_MODE_BYTES];
1988         size_t ch_num;
1989
1990         memset(bands, 0, sizeof(bands));
1991         /* 1-13: 11b/g channels. */
1992         setbit(bands, IEEE80211_MODE_11B);
1993         setbit(bands, IEEE80211_MODE_11G);
1994         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1995             IWM_NUM_2GHZ_CHANNELS - 1, bands);
1996
1997         /* 14: 11b channel only. */
1998         clrbit(bands, IEEE80211_MODE_11G);
1999         iwm_add_channel_band(sc, chans, maxchans, nchans,
2000             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2001
2002         if (data->sku_cap_band_52GHz_enable) {
2003                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2004                         ch_num = nitems(iwm_nvm_channels);
2005                 else
2006                         ch_num = nitems(iwm_nvm_channels_8000);
2007                 memset(bands, 0, sizeof(bands));
2008                 setbit(bands, IEEE80211_MODE_11A);
2009                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2010                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2011         }
2012 }
2013
2014 static void
2015 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2016         const uint16_t *mac_override, const uint16_t *nvm_hw)
2017 {
2018         const uint8_t *hw_addr;
2019
2020         if (mac_override) {
2021                 static const uint8_t reserved_mac[] = {
2022                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2023                 };
2024
2025                 hw_addr = (const uint8_t *)(mac_override +
2026                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2027
2028                 /*
2029                  * Store the MAC address from MAO section.
2030                  * No byte swapping is required in MAO section
2031                  */
2032                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2033
2034                 /*
2035                  * Force the use of the OTP MAC address in case of reserved MAC
2036                  * address in the NVM, or if address is given but invalid.
2037                  */
2038                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2039                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2040                     iwm_is_valid_ether_addr(data->hw_addr) &&
2041                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2042                         return;
2043
2044                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2045                     "%s: mac address from nvm override section invalid\n",
2046                     __func__);
2047         }
2048
2049         if (nvm_hw) {
2050                 /* read the mac address from WFMP registers */
2051                 uint32_t mac_addr0 =
2052                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2053                 uint32_t mac_addr1 =
2054                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2055
2056                 hw_addr = (const uint8_t *)&mac_addr0;
2057                 data->hw_addr[0] = hw_addr[3];
2058                 data->hw_addr[1] = hw_addr[2];
2059                 data->hw_addr[2] = hw_addr[1];
2060                 data->hw_addr[3] = hw_addr[0];
2061
2062                 hw_addr = (const uint8_t *)&mac_addr1;
2063                 data->hw_addr[4] = hw_addr[1];
2064                 data->hw_addr[5] = hw_addr[0];
2065
2066                 return;
2067         }
2068
2069         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2070         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2071 }
2072
2073 static int
2074 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2075             const uint16_t *phy_sku)
2076 {
2077         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2078                 return le16_to_cpup(nvm_sw + IWM_SKU);
2079
2080         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2081 }
2082
2083 static int
2084 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2085 {
2086         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2087                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2088         else
2089                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2090                                                 IWM_NVM_VERSION_8000));
2091 }
2092
2093 static int
2094 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2095                   const uint16_t *phy_sku)
2096 {
2097         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2098                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2099
2100         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2101 }
2102
2103 static int
2104 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2105 {
2106         int n_hw_addr;
2107
2108         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2109                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2110
2111         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2112
2113         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2114 }
2115
2116 static void
2117 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2118                   uint32_t radio_cfg)
2119 {
2120         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2121                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2122                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2123                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2124                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2125                 return;
2126         }
2127
2128         /* set the radio configuration for family 8000 */
2129         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2130         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2131         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2132         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2133         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2134         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2135 }
2136
2137 static int
2138 iwm_parse_nvm_data(struct iwm_softc *sc,
2139                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2140                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2141                    const uint16_t *phy_sku, const uint16_t *regulatory)
2142 {
2143         struct iwm_nvm_data *data = &sc->sc_nvm;
2144         uint8_t hw_addr[IEEE80211_ADDR_LEN];
2145         uint32_t sku, radio_cfg;
2146
2147         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2148
2149         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2150         iwm_set_radio_cfg(sc, data, radio_cfg);
2151
2152         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2153         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2154         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2155         data->sku_cap_11n_enable = 0;
2156
2157         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2158
2159         /* The byte order is little endian 16 bit, meaning 214365 */
2160         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2161                 IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
2162                 data->hw_addr[0] = hw_addr[1];
2163                 data->hw_addr[1] = hw_addr[0];
2164                 data->hw_addr[2] = hw_addr[3];
2165                 data->hw_addr[3] = hw_addr[2];
2166                 data->hw_addr[4] = hw_addr[5];
2167                 data->hw_addr[5] = hw_addr[4];
2168         } else {
2169                 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2170         }
2171
2172         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2173                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2174                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2175         } else {
2176                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2177                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2178         }
2179         data->calib_version = 255;   /* TODO:
2180                                         this value will prevent some checks from
2181                                         failing, we need to check if this
2182                                         field is still needed, and if it does,
2183                                         where is it in the NVM */
2184
2185         return 0;
2186 }
2187
2188 /*
2189  * END NVM PARSE
2190  */
2191
2192 static int
2193 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2194 {
2195         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2196
2197         /* Checking for required sections */
2198         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2199                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2200                     !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2201                         device_printf(sc->sc_dev,
2202                             "Can't parse empty OTP/NVM sections\n");
2203                         return ENOENT;
2204                 }
2205
2206                 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2207         } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2208                 /* SW and REGULATORY sections are mandatory */
2209                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2210                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2211                         device_printf(sc->sc_dev,
2212                             "Can't parse empty OTP/NVM sections\n");
2213                         return ENOENT;
2214                 }
2215                 /* MAC_OVERRIDE or at least HW section must exist */
2216                 if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2217                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2218                         device_printf(sc->sc_dev,
2219                             "Can't parse mac_address, empty sections\n");
2220                         return ENOENT;
2221                 }
2222
2223                 /* PHY_SKU section is mandatory in B0 */
2224                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2225                         device_printf(sc->sc_dev,
2226                             "Can't parse phy_sku in B0, empty sections\n");
2227                         return ENOENT;
2228                 }
2229
2230                 hw = (const uint16_t *)
2231                     sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2232         } else {
2233                 panic("unknown device family %d\n", sc->sc_device_family);
2234         }
2235
2236         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2237         calib = (const uint16_t *)
2238             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2239         regulatory = (const uint16_t *)
2240             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2241         mac_override = (const uint16_t *)
2242             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2243         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2244
2245         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2246             phy_sku, regulatory);
2247 }
2248
2249 static int
2250 iwm_nvm_init(struct iwm_softc *sc)
2251 {
2252         struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2253         int i, section, error;
2254         uint16_t len;
2255         uint8_t *buf;
2256         const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
2257
2258         memset(nvm_sections, 0 , sizeof(nvm_sections));
2259
2260         buf = malloc(bufsz, M_DEVBUF, M_NOWAIT);
2261         if (buf == NULL)
2262                 return ENOMEM;
2263
2264         for (i = 0; i < nitems(nvm_to_read); i++) {
2265                 section = nvm_to_read[i];
2266                 KASSERT(section <= nitems(nvm_sections),
2267                     ("too many sections"));
2268
2269                 error = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2270                 if (error) {
2271                         error = 0;
2272                         continue;
2273                 }
2274                 nvm_sections[section].data = malloc(len, M_DEVBUF, M_NOWAIT);
2275                 if (nvm_sections[section].data == NULL) {
2276                         error = ENOMEM;
2277                         break;
2278                 }
2279                 memcpy(nvm_sections[section].data, buf, len);
2280                 nvm_sections[section].length = len;
2281         }
2282         free(buf, M_DEVBUF);
2283         if (error == 0)
2284                 error = iwm_parse_nvm_sections(sc, nvm_sections);
2285
2286         for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2287                 if (nvm_sections[i].data != NULL)
2288                         free(nvm_sections[i].data, M_DEVBUF);
2289         }
2290
2291         return error;
2292 }
2293
2294 /*
2295  * Firmware loading gunk.  This is kind of a weird hybrid between the
2296  * iwn driver and the Linux iwlwifi driver.
2297  */
2298
2299 static int
2300 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2301         const uint8_t *section, uint32_t byte_cnt)
2302 {
2303         int error = EINVAL;
2304         uint32_t chunk_sz, offset;
2305
2306         chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2307
2308         for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2309                 uint32_t addr, len;
2310                 const uint8_t *data;
2311
2312                 addr = dst_addr + offset;
2313                 len = MIN(chunk_sz, byte_cnt - offset);
2314                 data = section + offset;
2315
2316                 error = iwm_firmware_load_chunk(sc, addr, data, len);
2317                 if (error)
2318                         break;
2319         }
2320
2321         return error;
2322 }
2323
2324 static int
2325 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2326         const uint8_t *chunk, uint32_t byte_cnt)
2327 {
2328         struct iwm_dma_info *dma = &sc->fw_dma;
2329         int error;
2330
2331         /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2332         memcpy(dma->vaddr, chunk, byte_cnt);
2333         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2334
2335         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2336             dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2337                 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2338                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2339         }
2340
2341         sc->sc_fw_chunk_done = 0;
2342
2343         if (!iwm_nic_lock(sc))
2344                 return EBUSY;
2345
2346         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2347             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2348         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2349             dst_addr);
2350         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2351             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2352         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2353             (iwm_get_dma_hi_addr(dma->paddr)
2354               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2355         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2356             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2357             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2358             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2359         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2360             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2361             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2362             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2363
2364         iwm_nic_unlock(sc);
2365
2366         /* wait 1s for this segment to load */
2367         while (!sc->sc_fw_chunk_done)
2368                 if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
2369                         break;
2370
2371         if (!sc->sc_fw_chunk_done) {
2372                 device_printf(sc->sc_dev,
2373                     "fw chunk addr 0x%x len %d failed to load\n",
2374                     dst_addr, byte_cnt);
2375         }
2376
2377         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2378             dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2379                 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2380                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2381                 iwm_nic_unlock(sc);
2382         }
2383
2384         return error;
2385 }
2386
2387 int
2388 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2389     int cpu, int *first_ucode_section)
2390 {
2391         int shift_param;
2392         int i, error = 0, sec_num = 0x1;
2393         uint32_t val, last_read_idx = 0;
2394         const void *data;
2395         uint32_t dlen;
2396         uint32_t offset;
2397
2398         if (cpu == 1) {
2399                 shift_param = 0;
2400                 *first_ucode_section = 0;
2401         } else {
2402                 shift_param = 16;
2403                 (*first_ucode_section)++;
2404         }
2405
2406         for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
2407                 last_read_idx = i;
2408                 data = fws->fw_sect[i].fws_data;
2409                 dlen = fws->fw_sect[i].fws_len;
2410                 offset = fws->fw_sect[i].fws_devoff;
2411
2412                 /*
2413                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2414                  * CPU1 to CPU2.
2415                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2416                  * CPU2 non paged to CPU2 paging sec.
2417                  */
2418                 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2419                     offset == IWM_PAGING_SEPARATOR_SECTION)
2420                         break;
2421
2422                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2423                     "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2424                     i, offset, dlen, cpu);
2425
2426                 if (dlen > sc->sc_fwdmasegsz) {
2427                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2428                             "chunk %d too large (%d bytes)\n", i, dlen);
2429                         error = EFBIG;
2430                 } else {
2431                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2432                 }
2433                 if (error) {
2434                         device_printf(sc->sc_dev,
2435                             "could not load firmware chunk %d (error %d)\n",
2436                             i, error);
2437                         return error;
2438                 }
2439
2440                 /* Notify the ucode of the loaded section number and status */
2441                 if (iwm_nic_lock(sc)) {
2442                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2443                         val = val | (sec_num << shift_param);
2444                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2445                         sec_num = (sec_num << 1) | 0x1;
2446                         iwm_nic_unlock(sc);
2447
2448                         /*
2449                          * The firmware won't load correctly without this delay.
2450                          */
2451                         DELAY(8000);
2452                 }
2453         }
2454
2455         *first_ucode_section = last_read_idx;
2456
2457         if (iwm_nic_lock(sc)) {
2458                 if (cpu == 1)
2459                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2460                 else
2461                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2462                 iwm_nic_unlock(sc);
2463         }
2464
2465         return 0;
2466 }
2467
2468 int
2469 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2470 {
2471         struct iwm_fw_sects *fws;
2472         int error = 0;
2473         int first_ucode_section;
2474
2475         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2476             ucode_type);
2477
2478         fws = &sc->sc_fw.fw_sects[ucode_type];
2479
2480         /* configure the ucode to be ready to get the secured image */
2481         /* release CPU reset */
2482         iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2483
2484         /* load to FW the binary Secured sections of CPU1 */
2485         error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2486         if (error)
2487                 return error;
2488
2489         /* load to FW the binary sections of CPU2 */
2490         return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2491 }
2492
2493 static int
2494 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2495 {
2496         struct iwm_fw_sects *fws;
2497         int error, i;
2498         const void *data;
2499         uint32_t dlen;
2500         uint32_t offset;
2501
2502         sc->sc_uc.uc_intr = 0;
2503
2504         fws = &sc->sc_fw.fw_sects[ucode_type];
2505         for (i = 0; i < fws->fw_count; i++) {
2506                 data = fws->fw_sect[i].fws_data;
2507                 dlen = fws->fw_sect[i].fws_len;
2508                 offset = fws->fw_sect[i].fws_devoff;
2509                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2510                     "LOAD FIRMWARE type %d offset %u len %d\n",
2511                     ucode_type, offset, dlen);
2512                 if (dlen > sc->sc_fwdmasegsz) {
2513                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2514                             "chunk %d too large (%d bytes)\n", i, dlen);
2515                         error = EFBIG;
2516                 } else {
2517                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2518                 }
2519                 if (error) {
2520                         device_printf(sc->sc_dev,
2521                             "could not load firmware chunk %u of %u "
2522                             "(error=%d)\n", i, fws->fw_count, error);
2523                         return error;
2524                 }
2525         }
2526
2527         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2528
2529         return 0;
2530 }
2531
2532 static int
2533 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2534 {
2535         int error, w;
2536
2537         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
2538                 error = iwm_load_firmware_8000(sc, ucode_type);
2539         else
2540                 error = iwm_load_firmware_7000(sc, ucode_type);
2541         if (error)
2542                 return error;
2543
2544         /* wait for the firmware to load */
2545         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2546                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2547         }
2548         if (error || !sc->sc_uc.uc_ok) {
2549                 device_printf(sc->sc_dev, "could not load firmware\n");
2550                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2551                         device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2552                             iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2553                         device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2554                             iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2555                 }
2556         }
2557
2558         /*
2559          * Give the firmware some time to initialize.
2560          * Accessing it too early causes errors.
2561          */
2562         msleep(&w, &sc->sc_mtx, 0, "iwmfwinit", hz);
2563
2564         return error;
2565 }
2566
2567 /* iwlwifi: pcie/trans.c */
2568 static int
2569 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2570 {
2571         int error;
2572
2573         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2574
2575         if ((error = iwm_nic_init(sc)) != 0) {
2576                 device_printf(sc->sc_dev, "unable to init nic\n");
2577                 return error;
2578         }
2579
2580         /* make sure rfkill handshake bits are cleared */
2581         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2582         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2583             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2584
2585         /* clear (again), then enable host interrupts */
2586         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2587         iwm_enable_interrupts(sc);
2588
2589         /* really make sure rfkill handshake bits are cleared */
2590         /* maybe we should write a few times more?  just to make sure */
2591         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2592         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2593
2594         /* Load the given image to the HW */
2595         return iwm_load_firmware(sc, ucode_type);
2596 }
2597
2598 static int
2599 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2600 {
2601         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2602                 .valid = htole32(valid_tx_ant),
2603         };
2604
2605         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2606             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2607 }
2608
2609 /* iwlwifi: mvm/fw.c */
2610 static int
2611 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2612 {
2613         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2614         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2615
2616         /* Set parameters */
2617         phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2618         phy_cfg_cmd.calib_control.event_trigger =
2619             sc->sc_default_calib[ucode_type].event_trigger;
2620         phy_cfg_cmd.calib_control.flow_trigger =
2621             sc->sc_default_calib[ucode_type].flow_trigger;
2622
2623         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2624             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2625         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2626             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2627 }
2628
2629 static int
2630 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2631         enum iwm_ucode_type ucode_type)
2632 {
2633         enum iwm_ucode_type old_type = sc->sc_uc_current;
2634         int error;
2635
2636         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2637                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2638                         error);
2639                 return error;
2640         }
2641
2642         sc->sc_uc_current = ucode_type;
2643         error = iwm_start_fw(sc, ucode_type);
2644         if (error) {
2645                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2646                 sc->sc_uc_current = old_type;
2647                 return error;
2648         }
2649
2650         error = iwm_post_alive(sc);
2651         if (error) {
2652                 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2653         }
2654         return error;
2655 }
2656
2657 /*
2658  * mvm misc bits
2659  */
2660
2661 /*
2662  * follows iwlwifi/fw.c
2663  */
2664 static int
2665 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2666 {
2667         int error;
2668
2669         /* do not operate with rfkill switch turned on */
2670         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2671                 device_printf(sc->sc_dev,
2672                     "radio is disabled by hardware switch\n");
2673                 return EPERM;
2674         }
2675
2676         sc->sc_init_complete = 0;
2677         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2678             IWM_UCODE_TYPE_INIT)) != 0) {
2679                 device_printf(sc->sc_dev, "failed to load init firmware\n");
2680                 return error;
2681         }
2682
2683         if (justnvm) {
2684                 if ((error = iwm_nvm_init(sc)) != 0) {
2685                         device_printf(sc->sc_dev, "failed to read nvm\n");
2686                         return error;
2687                 }
2688                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2689
2690                 return 0;
2691         }
2692
2693         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
2694                 device_printf(sc->sc_dev,
2695                     "failed to send bt coex configuration: %d\n", error);
2696                 return error;
2697         }
2698
2699         /* Init Smart FIFO. */
2700         error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2701         if (error != 0)
2702                 return error;
2703
2704         /* Send TX valid antennas before triggering calibrations */
2705         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
2706                 device_printf(sc->sc_dev,
2707                     "failed to send antennas before calibration: %d\n", error);
2708                 return error;
2709         }
2710
2711         /*
2712          * Send phy configurations command to init uCode
2713          * to start the 16.0 uCode init image internal calibrations.
2714          */
2715         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2716                 device_printf(sc->sc_dev,
2717                     "%s: failed to run internal calibration: %d\n",
2718                     __func__, error);
2719                 return error;
2720         }
2721
2722         /*
2723          * Nothing to do but wait for the init complete notification
2724          * from the firmware
2725          */
2726         while (!sc->sc_init_complete) {
2727                 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2728                                  0, "iwminit", 2*hz);
2729                 if (error) {
2730                         device_printf(sc->sc_dev, "init complete failed: %d\n",
2731                                 sc->sc_init_complete);
2732                         break;
2733                 }
2734         }
2735
2736         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
2737             sc->sc_init_complete ? "" : "not ");
2738
2739         return error;
2740 }
2741
2742 /*
2743  * receive side
2744  */
2745
2746 /* (re)stock rx ring, called at init-time and at runtime */
2747 static int
2748 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2749 {
2750         struct iwm_rx_ring *ring = &sc->rxq;
2751         struct iwm_rx_data *data = &ring->data[idx];
2752         struct mbuf *m;
2753         bus_dmamap_t dmamap = NULL;
2754         bus_dma_segment_t seg;
2755         int nsegs, error;
2756
2757         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2758         if (m == NULL)
2759                 return ENOBUFS;
2760
2761         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2762         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2763             &seg, &nsegs, BUS_DMA_NOWAIT);
2764         if (error != 0) {
2765                 device_printf(sc->sc_dev,
2766                     "%s: can't map mbuf, error %d\n", __func__, error);
2767                 goto fail;
2768         }
2769
2770         if (data->m != NULL)
2771                 bus_dmamap_unload(ring->data_dmat, data->map);
2772
2773         /* Swap ring->spare_map with data->map */
2774         dmamap = data->map;
2775         data->map = ring->spare_map;
2776         ring->spare_map = dmamap;
2777
2778         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2779         data->m = m;
2780
2781         /* Update RX descriptor. */
2782         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
2783         ring->desc[idx] = htole32(seg.ds_addr >> 8);
2784         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2785             BUS_DMASYNC_PREWRITE);
2786
2787         return 0;
2788 fail:
2789         m_freem(m);
2790         return error;
2791 }
2792
2793 /* iwlwifi: mvm/rx.c */
2794 #define IWM_RSSI_OFFSET 50
2795 static int
2796 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2797 {
2798         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2799         uint32_t agc_a, agc_b;
2800         uint32_t val;
2801
2802         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2803         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2804         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2805
2806         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2807         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2808         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2809
2810         /*
2811          * dBm = rssi dB - agc dB - constant.
2812          * Higher AGC (higher radio gain) means lower signal.
2813          */
2814         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2815         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2816         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2817
2818         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2819             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2820             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2821
2822         return max_rssi_dbm;
2823 }
2824
2825 /* iwlwifi: mvm/rx.c */
2826 /*
2827  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2828  * values are reported by the fw as positive values - need to negate
2829  * to obtain their dBM.  Account for missing antennas by replacing 0
2830  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2831  */
2832 static int
2833 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2834 {
2835         int energy_a, energy_b, energy_c, max_energy;
2836         uint32_t val;
2837
2838         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2839         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2840             IWM_RX_INFO_ENERGY_ANT_A_POS;
2841         energy_a = energy_a ? -energy_a : -256;
2842         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2843             IWM_RX_INFO_ENERGY_ANT_B_POS;
2844         energy_b = energy_b ? -energy_b : -256;
2845         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2846             IWM_RX_INFO_ENERGY_ANT_C_POS;
2847         energy_c = energy_c ? -energy_c : -256;
2848         max_energy = MAX(energy_a, energy_b);
2849         max_energy = MAX(max_energy, energy_c);
2850
2851         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2852             "energy In A %d B %d C %d , and max %d\n",
2853             energy_a, energy_b, energy_c, max_energy);
2854
2855         return max_energy;
2856 }
2857
2858 static void
2859 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2860         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2861 {
2862         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2863
2864         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2865         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2866
2867         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2868 }
2869
2870 /*
2871  * Retrieve the average noise (in dBm) among receivers.
2872  */
2873 static int
2874 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2875 {
2876         int i, total, nbant, noise;
2877
2878         total = nbant = noise = 0;
2879         for (i = 0; i < 3; i++) {
2880                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2881                 if (noise) {
2882                         total += noise;
2883                         nbant++;
2884                 }
2885         }
2886
2887         /* There should be at least one antenna but check anyway. */
2888         return (nbant == 0) ? -127 : (total / nbant) - 107;
2889 }
2890
2891 /*
2892  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2893  *
2894  * Handles the actual data of the Rx packet from the fw
2895  */
2896 static void
2897 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2898         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2899 {
2900         struct ieee80211com *ic = &sc->sc_ic;
2901         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2902         struct ieee80211_frame *wh;
2903         struct ieee80211_node *ni;
2904         struct ieee80211_rx_stats rxs;
2905         struct mbuf *m;
2906         struct iwm_rx_phy_info *phy_info;
2907         struct iwm_rx_mpdu_res_start *rx_res;
2908         uint32_t len;
2909         uint32_t rx_pkt_status;
2910         int rssi;
2911
2912         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2913
2914         phy_info = &sc->sc_last_phy_info;
2915         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2916         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2917         len = le16toh(rx_res->byte_count);
2918         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2919
2920         m = data->m;
2921         m->m_data = pkt->data + sizeof(*rx_res);
2922         m->m_pkthdr.len = m->m_len = len;
2923
2924         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2925                 device_printf(sc->sc_dev,
2926                     "dsp size out of range [0,20]: %d\n",
2927                     phy_info->cfg_phy_cnt);
2928                 return;
2929         }
2930
2931         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2932             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2933                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2934                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2935                 return; /* drop */
2936         }
2937
2938         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2939                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2940         } else {
2941                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
2942         }
2943         rssi = (0 - IWM_MIN_DBM) + rssi;        /* normalize */
2944         rssi = MIN(rssi, sc->sc_max_rssi);      /* clip to max. 100% */
2945
2946         /* replenish ring for the buffer we're going to feed to the sharks */
2947         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
2948                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
2949                     __func__);
2950                 return;
2951         }
2952
2953         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2954
2955         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2956             "%s: phy_info: channel=%d, flags=0x%08x\n",
2957             __func__,
2958             le16toh(phy_info->channel),
2959             le16toh(phy_info->phy_flags));
2960
2961         /*
2962          * Populate an RX state struct with the provided information.
2963          */
2964         bzero(&rxs, sizeof(rxs));
2965         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
2966         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
2967         rxs.c_ieee = le16toh(phy_info->channel);
2968         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2969                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
2970         } else {
2971                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
2972         }
2973         rxs.rssi = rssi - sc->sc_noise;
2974         rxs.nf = sc->sc_noise;
2975
2976         if (ieee80211_radiotap_active_vap(vap)) {
2977                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
2978
2979                 tap->wr_flags = 0;
2980                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
2981                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2982                 tap->wr_chan_freq = htole16(rxs.c_freq);
2983                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
2984                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
2985                 tap->wr_dbm_antsignal = (int8_t)rssi;
2986                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
2987                 tap->wr_tsft = phy_info->system_timestamp;
2988                 switch (phy_info->rate) {
2989                 /* CCK rates. */
2990                 case  10: tap->wr_rate =   2; break;
2991                 case  20: tap->wr_rate =   4; break;
2992                 case  55: tap->wr_rate =  11; break;
2993                 case 110: tap->wr_rate =  22; break;
2994                 /* OFDM rates. */
2995                 case 0xd: tap->wr_rate =  12; break;
2996                 case 0xf: tap->wr_rate =  18; break;
2997                 case 0x5: tap->wr_rate =  24; break;
2998                 case 0x7: tap->wr_rate =  36; break;
2999                 case 0x9: tap->wr_rate =  48; break;
3000                 case 0xb: tap->wr_rate =  72; break;
3001                 case 0x1: tap->wr_rate =  96; break;
3002                 case 0x3: tap->wr_rate = 108; break;
3003                 /* Unknown rate: should not happen. */
3004                 default:  tap->wr_rate =   0;
3005                 }
3006         }
3007
3008         IWM_UNLOCK(sc);
3009         if (ni != NULL) {
3010                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3011                 ieee80211_input_mimo(ni, m, &rxs);
3012                 ieee80211_free_node(ni);
3013         } else {
3014                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3015                 ieee80211_input_mimo_all(ic, m, &rxs);
3016         }
3017         IWM_LOCK(sc);
3018 }
3019
3020 static int
3021 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3022         struct iwm_node *in)
3023 {
3024         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3025         struct ieee80211_node *ni = &in->in_ni;
3026         struct ieee80211vap *vap = ni->ni_vap;
3027         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3028         int failack = tx_resp->failure_frame;
3029
3030         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3031
3032         /* Update rate control statistics. */
3033         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3034             __func__,
3035             (int) le16toh(tx_resp->status.status),
3036             (int) le16toh(tx_resp->status.sequence),
3037             tx_resp->frame_count,
3038             tx_resp->bt_kill_count,
3039             tx_resp->failure_rts,
3040             tx_resp->failure_frame,
3041             le32toh(tx_resp->initial_rate),
3042             (int) le16toh(tx_resp->wireless_media_time));
3043
3044         if (status != IWM_TX_STATUS_SUCCESS &&
3045             status != IWM_TX_STATUS_DIRECT_DONE) {
3046                 ieee80211_ratectl_tx_complete(vap, ni,
3047                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3048                 return (1);
3049         } else {
3050                 ieee80211_ratectl_tx_complete(vap, ni,
3051                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3052                 return (0);
3053         }
3054 }
3055
3056 static void
3057 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3058         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3059 {
3060         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3061         int idx = cmd_hdr->idx;
3062         int qid = cmd_hdr->qid;
3063         struct iwm_tx_ring *ring = &sc->txq[qid];
3064         struct iwm_tx_data *txd = &ring->data[idx];
3065         struct iwm_node *in = txd->in;
3066         struct mbuf *m = txd->m;
3067         int status;
3068
3069         KASSERT(txd->done == 0, ("txd not done"));
3070         KASSERT(txd->in != NULL, ("txd without node"));
3071         KASSERT(txd->m != NULL, ("txd without mbuf"));
3072
3073         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3074
3075         sc->sc_tx_timer = 0;
3076
3077         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3078
3079         /* Unmap and free mbuf. */
3080         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3081         bus_dmamap_unload(ring->data_dmat, txd->map);
3082
3083         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3084             "free txd %p, in %p\n", txd, txd->in);
3085         txd->done = 1;
3086         txd->m = NULL;
3087         txd->in = NULL;
3088
3089         ieee80211_tx_complete(&in->in_ni, m, status);
3090
3091         if (--ring->queued < IWM_TX_RING_LOMARK) {
3092                 sc->qfullmsk &= ~(1 << ring->qid);
3093                 if (sc->qfullmsk == 0) {
3094                         /*
3095                          * Well, we're in interrupt context, but then again
3096                          * I guess net80211 does all sorts of stunts in
3097                          * interrupt context, so maybe this is no biggie.
3098                          */
3099                         iwm_start(sc);
3100                 }
3101         }
3102 }
3103
3104 /*
3105  * transmit side
3106  */
3107
3108 /*
3109  * Process a "command done" firmware notification.  This is where we wakeup
3110  * processes waiting for a synchronous command completion.
3111  * from if_iwn
3112  */
3113 static void
3114 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3115 {
3116         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3117         struct iwm_tx_data *data;
3118
3119         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3120                 return; /* Not a command ack. */
3121         }
3122
3123         data = &ring->data[pkt->hdr.idx];
3124
3125         /* If the command was mapped in an mbuf, free it. */
3126         if (data->m != NULL) {
3127                 bus_dmamap_sync(ring->data_dmat, data->map,
3128                     BUS_DMASYNC_POSTWRITE);
3129                 bus_dmamap_unload(ring->data_dmat, data->map);
3130                 m_freem(data->m);
3131                 data->m = NULL;
3132         }
3133         wakeup(&ring->desc[pkt->hdr.idx]);
3134 }
3135
3136 #if 0
3137 /*
3138  * necessary only for block ack mode
3139  */
3140 void
3141 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3142         uint16_t len)
3143 {
3144         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3145         uint16_t w_val;
3146
3147         scd_bc_tbl = sc->sched_dma.vaddr;
3148
3149         len += 8; /* magic numbers came naturally from paris */
3150         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3151                 len = roundup(len, 4) / 4;
3152
3153         w_val = htole16(sta_id << 12 | len);
3154
3155         /* Update TX scheduler. */
3156         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3157         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3158             BUS_DMASYNC_PREWRITE);
3159
3160         /* I really wonder what this is ?!? */
3161         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3162                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3163                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3164                     BUS_DMASYNC_PREWRITE);
3165         }
3166 }
3167 #endif
3168
3169 /*
3170  * Take an 802.11 (non-n) rate, find the relevant rate
3171  * table entry.  return the index into in_ridx[].
3172  *
3173  * The caller then uses that index back into in_ridx
3174  * to figure out the rate index programmed /into/
3175  * the firmware for this given node.
3176  */
3177 static int
3178 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3179     uint8_t rate)
3180 {
3181         int i;
3182         uint8_t r;
3183
3184         for (i = 0; i < nitems(in->in_ridx); i++) {
3185                 r = iwm_rates[in->in_ridx[i]].rate;
3186                 if (rate == r)
3187                         return (i);
3188         }
3189         /* XXX Return the first */
3190         /* XXX TODO: have it return the /lowest/ */
3191         return (0);
3192 }
3193
3194 /*
3195  * Fill in the rate related information for a transmit command.
3196  */
3197 static const struct iwm_rate *
3198 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3199         struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3200 {
3201         struct ieee80211com *ic = &sc->sc_ic;
3202         struct ieee80211_node *ni = &in->in_ni;
3203         const struct iwm_rate *rinfo;
3204         int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3205         int ridx, rate_flags;
3206
3207         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3208         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3209
3210         /*
3211          * XXX TODO: everything about the rate selection here is terrible!
3212          */
3213
3214         if (type == IEEE80211_FC0_TYPE_DATA) {
3215                 int i;
3216                 /* for data frames, use RS table */
3217                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3218                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3219                 ridx = in->in_ridx[i];
3220
3221                 /* This is the index into the programmed table */
3222                 tx->initial_rate_index = i;
3223                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3224                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3225                     "%s: start with i=%d, txrate %d\n",
3226                     __func__, i, iwm_rates[ridx].rate);
3227         } else {
3228                 /*
3229                  * For non-data, use the lowest supported rate for the given
3230                  * operational mode.
3231                  *
3232                  * Note: there may not be any rate control information available.
3233                  * This driver currently assumes if we're transmitting data
3234                  * frames, use the rate control table.  Grr.
3235                  *
3236                  * XXX TODO: use the configured rate for the traffic type!
3237                  * XXX TODO: this should be per-vap, not curmode; as we later
3238                  * on we'll want to handle off-channel stuff (eg TDLS).
3239                  */
3240                 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3241                         /*
3242                          * XXX this assumes the mode is either 11a or not 11a;
3243                          * definitely won't work for 11n.
3244                          */
3245                         ridx = IWM_RIDX_OFDM;
3246                 } else {
3247                         ridx = IWM_RIDX_CCK;
3248                 }
3249         }
3250
3251         rinfo = &iwm_rates[ridx];
3252
3253         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3254             __func__, ridx,
3255             rinfo->rate,
3256             !! (IWM_RIDX_IS_CCK(ridx))
3257             );
3258
3259         /* XXX TODO: hard-coded TX antenna? */
3260         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3261         if (IWM_RIDX_IS_CCK(ridx))
3262                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3263         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3264
3265         return rinfo;
3266 }
3267
3268 #define TB0_SIZE 16
3269 static int
3270 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3271 {
3272         struct ieee80211com *ic = &sc->sc_ic;
3273         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3274         struct iwm_node *in = IWM_NODE(ni);
3275         struct iwm_tx_ring *ring;
3276         struct iwm_tx_data *data;
3277         struct iwm_tfd *desc;
3278         struct iwm_device_cmd *cmd;
3279         struct iwm_tx_cmd *tx;
3280         struct ieee80211_frame *wh;
3281         struct ieee80211_key *k = NULL;
3282         struct mbuf *m1;
3283         const struct iwm_rate *rinfo;
3284         uint32_t flags;
3285         u_int hdrlen;
3286         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3287         int nsegs;
3288         uint8_t tid, type;
3289         int i, totlen, error, pad;
3290
3291         wh = mtod(m, struct ieee80211_frame *);
3292         hdrlen = ieee80211_anyhdrsize(wh);
3293         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3294         tid = 0;
3295         ring = &sc->txq[ac];
3296         desc = &ring->desc[ring->cur];
3297         memset(desc, 0, sizeof(*desc));
3298         data = &ring->data[ring->cur];
3299
3300         /* Fill out iwm_tx_cmd to send to the firmware */
3301         cmd = &ring->cmd[ring->cur];
3302         cmd->hdr.code = IWM_TX_CMD;
3303         cmd->hdr.flags = 0;
3304         cmd->hdr.qid = ring->qid;
3305         cmd->hdr.idx = ring->cur;
3306
3307         tx = (void *)cmd->data;
3308         memset(tx, 0, sizeof(*tx));
3309
3310         rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3311
3312         /* Encrypt the frame if need be. */
3313         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3314                 /* Retrieve key for TX && do software encryption. */
3315                 k = ieee80211_crypto_encap(ni, m);
3316                 if (k == NULL) {
3317                         m_freem(m);
3318                         return (ENOBUFS);
3319                 }
3320                 /* 802.11 header may have moved. */
3321                 wh = mtod(m, struct ieee80211_frame *);
3322         }
3323
3324         if (ieee80211_radiotap_active_vap(vap)) {
3325                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3326
3327                 tap->wt_flags = 0;
3328                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3329                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3330                 tap->wt_rate = rinfo->rate;
3331                 if (k != NULL)
3332                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3333                 ieee80211_radiotap_tx(vap, m);
3334         }
3335
3336
3337         totlen = m->m_pkthdr.len;
3338
3339         flags = 0;
3340         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3341                 flags |= IWM_TX_CMD_FLG_ACK;
3342         }
3343
3344         if (type == IEEE80211_FC0_TYPE_DATA
3345             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3346             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3347                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3348         }
3349
3350         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3351             type != IEEE80211_FC0_TYPE_DATA)
3352                 tx->sta_id = sc->sc_aux_sta.sta_id;
3353         else
3354                 tx->sta_id = IWM_STATION_ID;
3355
3356         if (type == IEEE80211_FC0_TYPE_MGT) {
3357                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3358
3359                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3360                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3361                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3362                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3363                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3364                 } else {
3365                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3366                 }
3367         } else {
3368                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3369         }
3370
3371         if (hdrlen & 3) {
3372                 /* First segment length must be a multiple of 4. */
3373                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3374                 pad = 4 - (hdrlen & 3);
3375         } else
3376                 pad = 0;
3377
3378         tx->driver_txop = 0;
3379         tx->next_frame_len = 0;
3380
3381         tx->len = htole16(totlen);
3382         tx->tid_tspec = tid;
3383         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3384
3385         /* Set physical address of "scratch area". */
3386         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3387         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3388
3389         /* Copy 802.11 header in TX command. */
3390         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3391
3392         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3393
3394         tx->sec_ctl = 0;
3395         tx->tx_flags |= htole32(flags);
3396
3397         /* Trim 802.11 header. */
3398         m_adj(m, hdrlen);
3399         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3400             segs, &nsegs, BUS_DMA_NOWAIT);
3401         if (error != 0) {
3402                 if (error != EFBIG) {
3403                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3404                             error);
3405                         m_freem(m);
3406                         return error;
3407                 }
3408                 /* Too many DMA segments, linearize mbuf. */
3409                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3410                 if (m1 == NULL) {
3411                         device_printf(sc->sc_dev,
3412                             "%s: could not defrag mbuf\n", __func__);
3413                         m_freem(m);
3414                         return (ENOBUFS);
3415                 }
3416                 m = m1;
3417
3418                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3419                     segs, &nsegs, BUS_DMA_NOWAIT);
3420                 if (error != 0) {
3421                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3422                             error);
3423                         m_freem(m);
3424                         return error;
3425                 }
3426         }
3427         data->m = m;
3428         data->in = in;
3429         data->done = 0;
3430
3431         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3432             "sending txd %p, in %p\n", data, data->in);
3433         KASSERT(data->in != NULL, ("node is NULL"));
3434
3435         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3436             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3437             ring->qid, ring->cur, totlen, nsegs,
3438             le32toh(tx->tx_flags),
3439             le32toh(tx->rate_n_flags),
3440             tx->initial_rate_index
3441             );
3442
3443         /* Fill TX descriptor. */
3444         desc->num_tbs = 2 + nsegs;
3445
3446         desc->tbs[0].lo = htole32(data->cmd_paddr);
3447         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3448             (TB0_SIZE << 4);
3449         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3450         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3451             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3452               + hdrlen + pad - TB0_SIZE) << 4);
3453
3454         /* Other DMA segments are for data payload. */
3455         for (i = 0; i < nsegs; i++) {
3456                 seg = &segs[i];
3457                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3458                 desc->tbs[i+2].hi_n_len = \
3459                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3460                     | ((seg->ds_len) << 4);
3461         }
3462
3463         bus_dmamap_sync(ring->data_dmat, data->map,
3464             BUS_DMASYNC_PREWRITE);
3465         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3466             BUS_DMASYNC_PREWRITE);
3467         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3468             BUS_DMASYNC_PREWRITE);
3469
3470 #if 0
3471         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3472 #endif
3473
3474         /* Kick TX ring. */
3475         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3476         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3477
3478         /* Mark TX ring as full if we reach a certain threshold. */
3479         if (++ring->queued > IWM_TX_RING_HIMARK) {
3480                 sc->qfullmsk |= 1 << ring->qid;
3481         }
3482
3483         return 0;
3484 }
3485
3486 static int
3487 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3488     const struct ieee80211_bpf_params *params)
3489 {
3490         struct ieee80211com *ic = ni->ni_ic;
3491         struct iwm_softc *sc = ic->ic_softc;
3492         int error = 0;
3493
3494         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3495             "->%s begin\n", __func__);
3496
3497         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3498                 m_freem(m);
3499                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3500                     "<-%s not RUNNING\n", __func__);
3501                 return (ENETDOWN);
3502         }
3503
3504         IWM_LOCK(sc);
3505         /* XXX fix this */
3506         if (params == NULL) {
3507                 error = iwm_tx(sc, m, ni, 0);
3508         } else {
3509                 error = iwm_tx(sc, m, ni, 0);
3510         }
3511         sc->sc_tx_timer = 5;
3512         IWM_UNLOCK(sc);
3513
3514         return (error);
3515 }
3516
3517 /*
3518  * mvm/tx.c
3519  */
3520
3521 #if 0
3522 /*
3523  * Note that there are transports that buffer frames before they reach
3524  * the firmware. This means that after flush_tx_path is called, the
3525  * queue might not be empty. The race-free way to handle this is to:
3526  * 1) set the station as draining
3527  * 2) flush the Tx path
3528  * 3) wait for the transport queues to be empty
3529  */
3530 int
3531 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3532 {
3533         struct iwm_tx_path_flush_cmd flush_cmd = {
3534                 .queues_ctl = htole32(tfd_msk),
3535                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3536         };
3537         int ret;
3538
3539         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3540             sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3541             sizeof(flush_cmd), &flush_cmd);
3542         if (ret)
3543                 device_printf(sc->sc_dev,
3544                     "Flushing tx queue failed: %d\n", ret);
3545         return ret;
3546 }
3547 #endif
3548
3549 /*
3550  * BEGIN mvm/sta.c
3551  */
3552
3553 static int
3554 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3555         struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3556 {
3557         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3558             cmd, status);
3559 }
3560
3561 /* send station add/update command to firmware */
3562 static int
3563 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3564 {
3565         struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3566         int ret;
3567         uint32_t status;
3568
3569         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3570
3571         add_sta_cmd.sta_id = IWM_STATION_ID;
3572         add_sta_cmd.mac_id_n_color
3573             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3574                 IWM_DEFAULT_COLOR));
3575         if (!update) {
3576                 int ac;
3577                 for (ac = 0; ac < WME_NUM_AC; ac++) {
3578                         add_sta_cmd.tfd_queue_msk |=
3579                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3580                 }
3581                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3582         }
3583         add_sta_cmd.add_modify = update ? 1 : 0;
3584         add_sta_cmd.station_flags_msk
3585             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3586         add_sta_cmd.tid_disable_tx = htole16(0xffff);
3587         if (update)
3588                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3589
3590         status = IWM_ADD_STA_SUCCESS;
3591         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3592         if (ret)
3593                 return ret;
3594
3595         switch (status) {
3596         case IWM_ADD_STA_SUCCESS:
3597                 break;
3598         default:
3599                 ret = EIO;
3600                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3601                 break;
3602         }
3603
3604         return ret;
3605 }
3606
3607 static int
3608 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3609 {
3610         return iwm_mvm_sta_send_to_fw(sc, in, 0);
3611 }
3612
3613 static int
3614 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3615 {
3616         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3617 }
3618
3619 static int
3620 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3621         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3622 {
3623         struct iwm_mvm_add_sta_cmd_v7 cmd;
3624         int ret;
3625         uint32_t status;
3626
3627         memset(&cmd, 0, sizeof(cmd));
3628         cmd.sta_id = sta->sta_id;
3629         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3630
3631         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3632         cmd.tid_disable_tx = htole16(0xffff);
3633
3634         if (addr)
3635                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3636
3637         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3638         if (ret)
3639                 return ret;
3640
3641         switch (status) {
3642         case IWM_ADD_STA_SUCCESS:
3643                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3644                     "%s: Internal station added.\n", __func__);
3645                 return 0;
3646         default:
3647                 device_printf(sc->sc_dev,
3648                     "%s: Add internal station failed, status=0x%x\n",
3649                     __func__, status);
3650                 ret = EIO;
3651                 break;
3652         }
3653         return ret;
3654 }
3655
3656 static int
3657 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3658 {
3659         int ret;
3660
3661         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3662         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3663
3664         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3665         if (ret)
3666                 return ret;
3667
3668         ret = iwm_mvm_add_int_sta_common(sc,
3669             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3670
3671         if (ret)
3672                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3673         return ret;
3674 }
3675
3676 /*
3677  * END mvm/sta.c
3678  */
3679
3680 /*
3681  * BEGIN mvm/quota.c
3682  */
3683
3684 static int
3685 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3686 {
3687         struct iwm_time_quota_cmd cmd;
3688         int i, idx, ret, num_active_macs, quota, quota_rem;
3689         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3690         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3691         uint16_t id;
3692
3693         memset(&cmd, 0, sizeof(cmd));
3694
3695         /* currently, PHY ID == binding ID */
3696         if (in) {
3697                 id = in->in_phyctxt->id;
3698                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3699                 colors[id] = in->in_phyctxt->color;
3700
3701                 if (1)
3702                         n_ifs[id] = 1;
3703         }
3704
3705         /*
3706          * The FW's scheduling session consists of
3707          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3708          * equally between all the bindings that require quota
3709          */
3710         num_active_macs = 0;
3711         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3712                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3713                 num_active_macs += n_ifs[i];
3714         }
3715
3716         quota = 0;
3717         quota_rem = 0;
3718         if (num_active_macs) {
3719                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3720                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3721         }
3722
3723         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3724                 if (colors[i] < 0)
3725                         continue;
3726
3727                 cmd.quotas[idx].id_and_color =
3728                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3729
3730                 if (n_ifs[i] <= 0) {
3731                         cmd.quotas[idx].quota = htole32(0);
3732                         cmd.quotas[idx].max_duration = htole32(0);
3733                 } else {
3734                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3735                         cmd.quotas[idx].max_duration = htole32(0);
3736                 }
3737                 idx++;
3738         }
3739
3740         /* Give the remainder of the session to the first binding */
3741         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3742
3743         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3744             sizeof(cmd), &cmd);
3745         if (ret)
3746                 device_printf(sc->sc_dev,
3747                     "%s: Failed to send quota: %d\n", __func__, ret);
3748         return ret;
3749 }
3750
3751 /*
3752  * END mvm/quota.c
3753  */
3754
3755 /*
3756  * ieee80211 routines
3757  */
3758
3759 /*
3760  * Change to AUTH state in 80211 state machine.  Roughly matches what
3761  * Linux does in bss_info_changed().
3762  */
3763 static int
3764 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3765 {
3766         struct ieee80211_node *ni;
3767         struct iwm_node *in;
3768         struct iwm_vap *iv = IWM_VAP(vap);
3769         uint32_t duration;
3770         int error;
3771
3772         /*
3773          * XXX i have a feeling that the vap node is being
3774          * freed from underneath us. Grr.
3775          */
3776         ni = ieee80211_ref_node(vap->iv_bss);
3777         in = IWM_NODE(ni);
3778         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3779             "%s: called; vap=%p, bss ni=%p\n",
3780             __func__,
3781             vap,
3782             ni);
3783
3784         in->in_assoc = 0;
3785
3786         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3787         if (error != 0)
3788                 return error;
3789
3790         error = iwm_allow_mcast(vap, sc);
3791         if (error) {
3792                 device_printf(sc->sc_dev,
3793                     "%s: failed to set multicast\n", __func__);
3794                 goto out;
3795         }
3796
3797         /*
3798          * This is where it deviates from what Linux does.
3799          *
3800          * Linux iwlwifi doesn't reset the nic each time, nor does it
3801          * call ctxt_add() here.  Instead, it adds it during vap creation,
3802          * and always does a mac_ctx_changed().
3803          *
3804          * The openbsd port doesn't attempt to do that - it reset things
3805          * at odd states and does the add here.
3806          *
3807          * So, until the state handling is fixed (ie, we never reset
3808          * the NIC except for a firmware failure, which should drag
3809          * the NIC back to IDLE, re-setup and re-add all the mac/phy
3810          * contexts that are required), let's do a dirty hack here.
3811          */
3812         if (iv->is_uploaded) {
3813                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3814                         device_printf(sc->sc_dev,
3815                             "%s: failed to update MAC\n", __func__);
3816                         goto out;
3817                 }
3818                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3819                     in->in_ni.ni_chan, 1, 1)) != 0) {
3820                         device_printf(sc->sc_dev,
3821                             "%s: failed update phy ctxt\n", __func__);
3822                         goto out;
3823                 }
3824                 in->in_phyctxt = &sc->sc_phyctxt[0];
3825
3826                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3827                         device_printf(sc->sc_dev,
3828                             "%s: binding update cmd\n", __func__);
3829                         goto out;
3830                 }
3831                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3832                         device_printf(sc->sc_dev,
3833                             "%s: failed to update sta\n", __func__);
3834                         goto out;
3835                 }
3836         } else {
3837                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3838                         device_printf(sc->sc_dev,
3839                             "%s: failed to add MAC\n", __func__);
3840                         goto out;
3841                 }
3842                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3843                     in->in_ni.ni_chan, 1, 1)) != 0) {
3844                         device_printf(sc->sc_dev,
3845                             "%s: failed add phy ctxt!\n", __func__);
3846                         error = ETIMEDOUT;
3847                         goto out;
3848                 }
3849                 in->in_phyctxt = &sc->sc_phyctxt[0];
3850
3851                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3852                         device_printf(sc->sc_dev,
3853                             "%s: binding add cmd\n", __func__);
3854                         goto out;
3855                 }
3856                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3857                         device_printf(sc->sc_dev,
3858                             "%s: failed to add sta\n", __func__);
3859                         goto out;
3860                 }
3861         }
3862
3863         /*
3864          * Prevent the FW from wandering off channel during association
3865          * by "protecting" the session with a time event.
3866          */
3867         /* XXX duration is in units of TU, not MS */
3868         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3869         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3870         DELAY(100);
3871
3872         error = 0;
3873 out:
3874         ieee80211_free_node(ni);
3875         return (error);
3876 }
3877
3878 static int
3879 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3880 {
3881         struct iwm_node *in = IWM_NODE(vap->iv_bss);
3882         int error;
3883
3884         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3885                 device_printf(sc->sc_dev,
3886                     "%s: failed to update STA\n", __func__);
3887                 return error;
3888         }
3889
3890         in->in_assoc = 1;
3891         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3892                 device_printf(sc->sc_dev,
3893                     "%s: failed to update MAC\n", __func__);
3894                 return error;
3895         }
3896
3897         return 0;
3898 }
3899
3900 static int
3901 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3902 {
3903         /*
3904          * Ok, so *technically* the proper set of calls for going
3905          * from RUN back to SCAN is:
3906          *
3907          * iwm_mvm_power_mac_disable(sc, in);
3908          * iwm_mvm_mac_ctxt_changed(sc, in);
3909          * iwm_mvm_rm_sta(sc, in);
3910          * iwm_mvm_update_quotas(sc, NULL);
3911          * iwm_mvm_mac_ctxt_changed(sc, in);
3912          * iwm_mvm_binding_remove_vif(sc, in);
3913          * iwm_mvm_mac_ctxt_remove(sc, in);
3914          *
3915          * However, that freezes the device not matter which permutations
3916          * and modifications are attempted.  Obviously, this driver is missing
3917          * something since it works in the Linux driver, but figuring out what
3918          * is missing is a little more complicated.  Now, since we're going
3919          * back to nothing anyway, we'll just do a complete device reset.
3920          * Up your's, device!
3921          */
3922         /* iwm_mvm_flush_tx_path(sc, 0xf, 1); */
3923         iwm_stop_device(sc);
3924         iwm_init_hw(sc);
3925         if (in)
3926                 in->in_assoc = 0;
3927         return 0;
3928
3929 #if 0
3930         int error;
3931
3932         iwm_mvm_power_mac_disable(sc, in);
3933
3934         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3935                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
3936                 return error;
3937         }
3938
3939         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
3940                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
3941                 return error;
3942         }
3943         error = iwm_mvm_rm_sta(sc, in);
3944         in->in_assoc = 0;
3945         iwm_mvm_update_quotas(sc, NULL);
3946         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3947                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
3948                 return error;
3949         }
3950         iwm_mvm_binding_remove_vif(sc, in);
3951
3952         iwm_mvm_mac_ctxt_remove(sc, in);
3953
3954         return error;
3955 #endif
3956 }
3957
3958 static struct ieee80211_node *
3959 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3960 {
3961         return malloc(sizeof (struct iwm_node), M_80211_NODE,
3962             M_NOWAIT | M_ZERO);
3963 }
3964
3965 static void
3966 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
3967 {
3968         struct ieee80211_node *ni = &in->in_ni;
3969         struct iwm_lq_cmd *lq = &in->in_lq;
3970         int nrates = ni->ni_rates.rs_nrates;
3971         int i, ridx, tab = 0;
3972         int txant = 0;
3973
3974         if (nrates > nitems(lq->rs_table)) {
3975                 device_printf(sc->sc_dev,
3976                     "%s: node supports %d rates, driver handles "
3977                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
3978                 return;
3979         }
3980         if (nrates == 0) {
3981                 device_printf(sc->sc_dev,
3982                     "%s: node supports 0 rates, odd!\n", __func__);
3983                 return;
3984         }
3985
3986         /*
3987          * XXX .. and most of iwm_node is not initialised explicitly;
3988          * it's all just 0x0 passed to the firmware.
3989          */
3990
3991         /* first figure out which rates we should support */
3992         /* XXX TODO: this isn't 11n aware /at all/ */
3993         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
3994         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3995             "%s: nrates=%d\n", __func__, nrates);
3996
3997         /*
3998          * Loop over nrates and populate in_ridx from the highest
3999          * rate to the lowest rate.  Remember, in_ridx[] has
4000          * IEEE80211_RATE_MAXSIZE entries!
4001          */
4002         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4003                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4004
4005                 /* Map 802.11 rate to HW rate index. */
4006                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4007                         if (iwm_rates[ridx].rate == rate)
4008                                 break;
4009                 if (ridx > IWM_RIDX_MAX) {
4010                         device_printf(sc->sc_dev,
4011                             "%s: WARNING: device rate for %d not found!\n",
4012                             __func__, rate);
4013                 } else {
4014                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4015                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4016                             __func__,
4017                             i,
4018                             rate,
4019                             ridx);
4020                         in->in_ridx[i] = ridx;
4021                 }
4022         }
4023
4024         /* then construct a lq_cmd based on those */
4025         memset(lq, 0, sizeof(*lq));
4026         lq->sta_id = IWM_STATION_ID;
4027
4028         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4029         if (ni->ni_flags & IEEE80211_NODE_HT)
4030                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4031
4032         /*
4033          * are these used? (we don't do SISO or MIMO)
4034          * need to set them to non-zero, though, or we get an error.
4035          */
4036         lq->single_stream_ant_msk = 1;
4037         lq->dual_stream_ant_msk = 1;
4038
4039         /*
4040          * Build the actual rate selection table.
4041          * The lowest bits are the rates.  Additionally,
4042          * CCK needs bit 9 to be set.  The rest of the bits
4043          * we add to the table select the tx antenna
4044          * Note that we add the rates in the highest rate first
4045          * (opposite of ni_rates).
4046          */
4047         /*
4048          * XXX TODO: this should be looping over the min of nrates
4049          * and LQ_MAX_RETRY_NUM.  Sigh.
4050          */
4051         for (i = 0; i < nrates; i++) {
4052                 int nextant;
4053
4054                 if (txant == 0)
4055                         txant = iwm_fw_valid_tx_ant(sc);
4056                 nextant = 1<<(ffs(txant)-1);
4057                 txant &= ~nextant;
4058
4059                 /*
4060                  * Map the rate id into a rate index into
4061                  * our hardware table containing the
4062                  * configuration to use for this rate.
4063                  */
4064                 ridx = in->in_ridx[i];
4065                 tab = iwm_rates[ridx].plcp;
4066                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4067                 if (IWM_RIDX_IS_CCK(ridx))
4068                         tab |= IWM_RATE_MCS_CCK_MSK;
4069                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4070                     "station rate i=%d, rate=%d, hw=%x\n",
4071                     i, iwm_rates[ridx].rate, tab);
4072                 lq->rs_table[i] = htole32(tab);
4073         }
4074         /* then fill the rest with the lowest possible rate */
4075         for (i = nrates; i < nitems(lq->rs_table); i++) {
4076                 KASSERT(tab != 0, ("invalid tab"));
4077                 lq->rs_table[i] = htole32(tab);
4078         }
4079 }
4080
4081 static int
4082 iwm_media_change(struct ifnet *ifp)
4083 {
4084         struct ieee80211vap *vap = ifp->if_softc;
4085         struct ieee80211com *ic = vap->iv_ic;
4086         struct iwm_softc *sc = ic->ic_softc;
4087         int error;
4088
4089         error = ieee80211_media_change(ifp);
4090         if (error != ENETRESET)
4091                 return error;
4092
4093         IWM_LOCK(sc);
4094         if (ic->ic_nrunning > 0) {
4095                 iwm_stop(sc);
4096                 iwm_init(sc);
4097         }
4098         IWM_UNLOCK(sc);
4099         return error;
4100 }
4101
4102
4103 static int
4104 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4105 {
4106         struct iwm_vap *ivp = IWM_VAP(vap);
4107         struct ieee80211com *ic = vap->iv_ic;
4108         struct iwm_softc *sc = ic->ic_softc;
4109         struct iwm_node *in;
4110         int error;
4111
4112         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4113             "switching state %s -> %s\n",
4114             ieee80211_state_name[vap->iv_state],
4115             ieee80211_state_name[nstate]);
4116         IEEE80211_UNLOCK(ic);
4117         IWM_LOCK(sc);
4118
4119         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4120                 iwm_led_blink_stop(sc);
4121
4122         /* disable beacon filtering if we're hopping out of RUN */
4123         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4124                 iwm_mvm_disable_beacon_filter(sc);
4125
4126                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4127                         in->in_assoc = 0;
4128
4129                 iwm_release(sc, NULL);
4130
4131                 /*
4132                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4133                  * above then the card will be completely reinitialized,
4134                  * so the driver must do everything necessary to bring the card
4135                  * from INIT to SCAN.
4136                  *
4137                  * Additionally, upon receiving deauth frame from AP,
4138                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4139                  * state. This will also fail with this driver, so bring the FSM
4140                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4141                  *
4142                  * XXX TODO: fix this for FreeBSD!
4143                  */
4144                 if (nstate == IEEE80211_S_SCAN ||
4145                     nstate == IEEE80211_S_AUTH ||
4146                     nstate == IEEE80211_S_ASSOC) {
4147                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4148                             "Force transition to INIT; MGT=%d\n", arg);
4149                         IWM_UNLOCK(sc);
4150                         IEEE80211_LOCK(ic);
4151                         /* Always pass arg as -1 since we can't Tx right now. */
4152                         /*
4153                          * XXX arg is just ignored anyway when transitioning
4154                          *     to IEEE80211_S_INIT.
4155                          */
4156                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4157                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4158                             "Going INIT->SCAN\n");
4159                         nstate = IEEE80211_S_SCAN;
4160                         IEEE80211_UNLOCK(ic);
4161                         IWM_LOCK(sc);
4162                 }
4163         }
4164
4165         switch (nstate) {
4166         case IEEE80211_S_INIT:
4167                 break;
4168
4169         case IEEE80211_S_AUTH:
4170                 if ((error = iwm_auth(vap, sc)) != 0) {
4171                         device_printf(sc->sc_dev,
4172                             "%s: could not move to auth state: %d\n",
4173                             __func__, error);
4174                         break;
4175                 }
4176                 break;
4177
4178         case IEEE80211_S_ASSOC:
4179                 if ((error = iwm_assoc(vap, sc)) != 0) {
4180                         device_printf(sc->sc_dev,
4181                             "%s: failed to associate: %d\n", __func__,
4182                             error);
4183                         break;
4184                 }
4185                 break;
4186
4187         case IEEE80211_S_RUN:
4188         {
4189                 struct iwm_host_cmd cmd = {
4190                         .id = IWM_LQ_CMD,
4191                         .len = { sizeof(in->in_lq), },
4192                         .flags = IWM_CMD_SYNC,
4193                 };
4194
4195                 /* Update the association state, now we have it all */
4196                 /* (eg associd comes in at this point */
4197                 error = iwm_assoc(vap, sc);
4198                 if (error != 0) {
4199                         device_printf(sc->sc_dev,
4200                             "%s: failed to update association state: %d\n",
4201                             __func__,
4202                             error);
4203                         break;
4204                 }
4205
4206                 in = IWM_NODE(vap->iv_bss);
4207                 iwm_mvm_power_mac_update_mode(sc, in);
4208                 iwm_mvm_enable_beacon_filter(sc, in);
4209                 iwm_mvm_update_quotas(sc, in);
4210                 iwm_setrates(sc, in);
4211
4212                 cmd.data[0] = &in->in_lq;
4213                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4214                         device_printf(sc->sc_dev,
4215                             "%s: IWM_LQ_CMD failed\n", __func__);
4216                 }
4217
4218                 iwm_mvm_led_enable(sc);
4219                 break;
4220         }
4221
4222         default:
4223                 break;
4224         }
4225         IWM_UNLOCK(sc);
4226         IEEE80211_LOCK(ic);
4227
4228         return (ivp->iv_newstate(vap, nstate, arg));
4229 }
4230
4231 void
4232 iwm_endscan_cb(void *arg, int pending)
4233 {
4234         struct iwm_softc *sc = arg;
4235         struct ieee80211com *ic = &sc->sc_ic;
4236
4237         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4238             "%s: scan ended\n",
4239             __func__);
4240
4241         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4242 }
4243
4244 /*
4245  * Aging and idle timeouts for the different possible scenarios
4246  * in default configuration
4247  */
4248 static const uint32_t
4249 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4250         {
4251                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4252                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4253         },
4254         {
4255                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4256                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4257         },
4258         {
4259                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4260                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4261         },
4262         {
4263                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4264                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4265         },
4266         {
4267                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4268                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4269         },
4270 };
4271
4272 /*
4273  * Aging and idle timeouts for the different possible scenarios
4274  * in single BSS MAC configuration.
4275  */
4276 static const uint32_t
4277 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4278         {
4279                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4280                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4281         },
4282         {
4283                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4284                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4285         },
4286         {
4287                 htole32(IWM_SF_MCAST_AGING_TIMER),
4288                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4289         },
4290         {
4291                 htole32(IWM_SF_BA_AGING_TIMER),
4292                 htole32(IWM_SF_BA_IDLE_TIMER)
4293         },
4294         {
4295                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4296                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4297         },
4298 };
4299
4300 static void
4301 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4302     struct ieee80211_node *ni)
4303 {
4304         int i, j, watermark;
4305
4306         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4307
4308         /*
4309          * If we are in association flow - check antenna configuration
4310          * capabilities of the AP station, and choose the watermark accordingly.
4311          */
4312         if (ni) {
4313                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4314 #ifdef notyet
4315                         if (ni->ni_rxmcs[2] != 0)
4316                                 watermark = IWM_SF_W_MARK_MIMO3;
4317                         else if (ni->ni_rxmcs[1] != 0)
4318                                 watermark = IWM_SF_W_MARK_MIMO2;
4319                         else
4320 #endif
4321                                 watermark = IWM_SF_W_MARK_SISO;
4322                 } else {
4323                         watermark = IWM_SF_W_MARK_LEGACY;
4324                 }
4325         /* default watermark value for unassociated mode. */
4326         } else {
4327                 watermark = IWM_SF_W_MARK_MIMO2;
4328         }
4329         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4330
4331         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4332                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4333                         sf_cmd->long_delay_timeouts[i][j] =
4334                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4335                 }
4336         }
4337
4338         if (ni) {
4339                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4340                        sizeof(iwm_sf_full_timeout));
4341         } else {
4342                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4343                        sizeof(iwm_sf_full_timeout_def));
4344         }
4345 }
4346
4347 static int
4348 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4349 {
4350         struct ieee80211com *ic = &sc->sc_ic;
4351         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4352         struct iwm_sf_cfg_cmd sf_cmd = {
4353                 .state = htole32(IWM_SF_FULL_ON),
4354         };
4355         int ret = 0;
4356
4357         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
4358                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4359
4360         switch (new_state) {
4361         case IWM_SF_UNINIT:
4362         case IWM_SF_INIT_OFF:
4363                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4364                 break;
4365         case IWM_SF_FULL_ON:
4366                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4367                 break;
4368         default:
4369                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4370                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4371                           new_state);
4372                 return EINVAL;
4373         }
4374
4375         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4376                                    sizeof(sf_cmd), &sf_cmd);
4377         return ret;
4378 }
4379
4380 static int
4381 iwm_send_bt_init_conf(struct iwm_softc *sc)
4382 {
4383         struct iwm_bt_coex_cmd bt_cmd;
4384
4385         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4386         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4387
4388         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4389             &bt_cmd);
4390 }
4391
4392 static int
4393 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4394 {
4395         struct iwm_mcc_update_cmd mcc_cmd;
4396         struct iwm_host_cmd hcmd = {
4397                 .id = IWM_MCC_UPDATE_CMD,
4398                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4399                 .data = { &mcc_cmd },
4400         };
4401         int ret;
4402 #ifdef IWM_DEBUG
4403         struct iwm_rx_packet *pkt;
4404         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4405         struct iwm_mcc_update_resp *mcc_resp;
4406         int n_channels;
4407         uint16_t mcc;
4408 #endif
4409         int resp_v2 = isset(sc->sc_enabled_capa,
4410             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4411
4412         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4413         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4414         if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4415             isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4416                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4417         else
4418                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4419
4420         if (resp_v2)
4421                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4422         else
4423                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4424
4425         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4426             "send MCC update to FW with '%c%c' src = %d\n",
4427             alpha2[0], alpha2[1], mcc_cmd.source_id);
4428
4429         ret = iwm_send_cmd(sc, &hcmd);
4430         if (ret)
4431                 return ret;
4432
4433 #ifdef IWM_DEBUG
4434         pkt = hcmd.resp_pkt;
4435
4436         /* Extract MCC response */
4437         if (resp_v2) {
4438                 mcc_resp = (void *)pkt->data;
4439                 mcc = mcc_resp->mcc;
4440                 n_channels =  le32toh(mcc_resp->n_channels);
4441         } else {
4442                 mcc_resp_v1 = (void *)pkt->data;
4443                 mcc = mcc_resp_v1->mcc;
4444                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4445         }
4446
4447         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4448         if (mcc == 0)
4449                 mcc = 0x3030;  /* "00" - world */
4450
4451         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4452             "regulatory domain '%c%c' (%d channels available)\n",
4453             mcc >> 8, mcc & 0xff, n_channels);
4454 #endif
4455         iwm_free_resp(sc, &hcmd);
4456
4457         return 0;
4458 }
4459
4460 static void
4461 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4462 {
4463         struct iwm_host_cmd cmd = {
4464                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4465                 .len = { sizeof(uint32_t), },
4466                 .data = { &backoff, },
4467         };
4468
4469         if (iwm_send_cmd(sc, &cmd) != 0) {
4470                 device_printf(sc->sc_dev,
4471                     "failed to change thermal tx backoff\n");
4472         }
4473 }
4474
4475 static int
4476 iwm_init_hw(struct iwm_softc *sc)
4477 {
4478         struct ieee80211com *ic = &sc->sc_ic;
4479         int error, i, ac;
4480
4481         if ((error = iwm_start_hw(sc)) != 0) {
4482                 printf("iwm_start_hw: failed %d\n", error);
4483                 return error;
4484         }
4485
4486         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4487                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4488                 return error;
4489         }
4490
4491         /*
4492          * should stop and start HW since that INIT
4493          * image just loaded
4494          */
4495         iwm_stop_device(sc);
4496         if ((error = iwm_start_hw(sc)) != 0) {
4497                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4498                 return error;
4499         }
4500
4501         /* omstart, this time with the regular firmware */
4502         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
4503         if (error) {
4504                 device_printf(sc->sc_dev, "could not load firmware\n");
4505                 goto error;
4506         }
4507
4508         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4509                 device_printf(sc->sc_dev, "bt init conf failed\n");
4510                 goto error;
4511         }
4512
4513         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
4514                 device_printf(sc->sc_dev, "antenna config failed\n");
4515                 goto error;
4516         }
4517
4518         /* Send phy db control command and then phy db calibration*/
4519         if ((error = iwm_send_phy_db_data(sc)) != 0) {
4520                 device_printf(sc->sc_dev, "phy_db_data failed\n");
4521                 goto error;
4522         }
4523
4524         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4525                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4526                 goto error;
4527         }
4528
4529         /* Add auxiliary station for scanning */
4530         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4531                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4532                 goto error;
4533         }
4534
4535         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4536                 /*
4537                  * The channel used here isn't relevant as it's
4538                  * going to be overwritten in the other flows.
4539                  * For now use the first channel we have.
4540                  */
4541                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4542                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4543                         goto error;
4544         }
4545
4546         /* Initialize tx backoffs to the minimum. */
4547         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
4548                 iwm_mvm_tt_tx_backoff(sc, 0);
4549
4550         error = iwm_mvm_power_update_device(sc);
4551         if (error)
4552                 goto error;
4553
4554         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4555                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4556                         goto error;
4557         }
4558
4559         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4560                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4561                         goto error;
4562         }
4563
4564         /* Enable Tx queues. */
4565         for (ac = 0; ac < WME_NUM_AC; ac++) {
4566                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4567                     iwm_mvm_ac_to_tx_fifo[ac]);
4568                 if (error)
4569                         goto error;
4570         }
4571
4572         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4573                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4574                 goto error;
4575         }
4576
4577         return 0;
4578
4579  error:
4580         iwm_stop_device(sc);
4581         return error;
4582 }
4583
4584 /* Allow multicast from our BSSID. */
4585 static int
4586 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4587 {
4588         struct ieee80211_node *ni = vap->iv_bss;
4589         struct iwm_mcast_filter_cmd *cmd;
4590         size_t size;
4591         int error;
4592
4593         size = roundup(sizeof(*cmd), 4);
4594         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4595         if (cmd == NULL)
4596                 return ENOMEM;
4597         cmd->filter_own = 1;
4598         cmd->port_id = 0;
4599         cmd->count = 0;
4600         cmd->pass_all = 1;
4601         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4602
4603         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4604             IWM_CMD_SYNC, size, cmd);
4605         free(cmd, M_DEVBUF);
4606
4607         return (error);
4608 }
4609
4610 /*
4611  * ifnet interfaces
4612  */
4613
4614 static void
4615 iwm_init(struct iwm_softc *sc)
4616 {
4617         int error;
4618
4619         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4620                 return;
4621         }
4622         sc->sc_generation++;
4623         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4624
4625         if ((error = iwm_init_hw(sc)) != 0) {
4626                 printf("iwm_init_hw failed %d\n", error);
4627                 iwm_stop(sc);
4628                 return;
4629         }
4630
4631         /*
4632          * Ok, firmware loaded and we are jogging
4633          */
4634         sc->sc_flags |= IWM_FLAG_HW_INITED;
4635         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4636 }
4637
4638 static int
4639 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4640 {
4641         struct iwm_softc *sc;
4642         int error;
4643
4644         sc = ic->ic_softc;
4645
4646         IWM_LOCK(sc);
4647         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4648                 IWM_UNLOCK(sc);
4649                 return (ENXIO);
4650         }
4651         error = mbufq_enqueue(&sc->sc_snd, m);
4652         if (error) {
4653                 IWM_UNLOCK(sc);
4654                 return (error);
4655         }
4656         iwm_start(sc);
4657         IWM_UNLOCK(sc);
4658         return (0);
4659 }
4660
4661 /*
4662  * Dequeue packets from sendq and call send.
4663  */
4664 static void
4665 iwm_start(struct iwm_softc *sc)
4666 {
4667         struct ieee80211_node *ni;
4668         struct mbuf *m;
4669         int ac = 0;
4670
4671         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4672         while (sc->qfullmsk == 0 &&
4673                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4674                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4675                 if (iwm_tx(sc, m, ni, ac) != 0) {
4676                         if_inc_counter(ni->ni_vap->iv_ifp,
4677                             IFCOUNTER_OERRORS, 1);
4678                         ieee80211_free_node(ni);
4679                         continue;
4680                 }
4681                 sc->sc_tx_timer = 15;
4682         }
4683         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4684 }
4685
4686 static void
4687 iwm_stop(struct iwm_softc *sc)
4688 {
4689
4690         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4691         sc->sc_flags |= IWM_FLAG_STOPPED;
4692         sc->sc_generation++;
4693         iwm_led_blink_stop(sc);
4694         sc->sc_tx_timer = 0;
4695         iwm_stop_device(sc);
4696 }
4697
4698 static void
4699 iwm_watchdog(void *arg)
4700 {
4701         struct iwm_softc *sc = arg;
4702         struct ieee80211com *ic = &sc->sc_ic;
4703
4704         if (sc->sc_tx_timer > 0) {
4705                 if (--sc->sc_tx_timer == 0) {
4706                         device_printf(sc->sc_dev, "device timeout\n");
4707 #ifdef IWM_DEBUG
4708                         iwm_nic_error(sc);
4709 #endif
4710                         ieee80211_restart_all(ic);
4711                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4712                         return;
4713                 }
4714         }
4715         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4716 }
4717
4718 static void
4719 iwm_parent(struct ieee80211com *ic)
4720 {
4721         struct iwm_softc *sc = ic->ic_softc;
4722         int startall = 0;
4723
4724         IWM_LOCK(sc);
4725         if (ic->ic_nrunning > 0) {
4726                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4727                         iwm_init(sc);
4728                         startall = 1;
4729                 }
4730         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4731                 iwm_stop(sc);
4732         IWM_UNLOCK(sc);
4733         if (startall)
4734                 ieee80211_start_all(ic);
4735 }
4736
4737 /*
4738  * The interrupt side of things
4739  */
4740
4741 /*
4742  * error dumping routines are from iwlwifi/mvm/utils.c
4743  */
4744
4745 /*
4746  * Note: This structure is read from the device with IO accesses,
4747  * and the reading already does the endian conversion. As it is
4748  * read with uint32_t-sized accesses, any members with a different size
4749  * need to be ordered correctly though!
4750  */
4751 struct iwm_error_event_table {
4752         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4753         uint32_t error_id;              /* type of error */
4754         uint32_t trm_hw_status0;        /* TRM HW status */
4755         uint32_t trm_hw_status1;        /* TRM HW status */
4756         uint32_t blink2;                /* branch link */
4757         uint32_t ilink1;                /* interrupt link */
4758         uint32_t ilink2;                /* interrupt link */
4759         uint32_t data1;         /* error-specific data */
4760         uint32_t data2;         /* error-specific data */
4761         uint32_t data3;         /* error-specific data */
4762         uint32_t bcon_time;             /* beacon timer */
4763         uint32_t tsf_low;               /* network timestamp function timer */
4764         uint32_t tsf_hi;                /* network timestamp function timer */
4765         uint32_t gp1;           /* GP1 timer register */
4766         uint32_t gp2;           /* GP2 timer register */
4767         uint32_t fw_rev_type;   /* firmware revision type */
4768         uint32_t major;         /* uCode version major */
4769         uint32_t minor;         /* uCode version minor */
4770         uint32_t hw_ver;                /* HW Silicon version */
4771         uint32_t brd_ver;               /* HW board version */
4772         uint32_t log_pc;                /* log program counter */
4773         uint32_t frame_ptr;             /* frame pointer */
4774         uint32_t stack_ptr;             /* stack pointer */
4775         uint32_t hcmd;          /* last host command header */
4776         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4777                                  * rxtx_flag */
4778         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4779                                  * host_flag */
4780         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4781                                  * enc_flag */
4782         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4783                                  * time_flag */
4784         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4785                                  * wico interrupt */
4786         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
4787         uint32_t wait_event;            /* wait event() caller address */
4788         uint32_t l2p_control;   /* L2pControlField */
4789         uint32_t l2p_duration;  /* L2pDurationField */
4790         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4791         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4792         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4793                                  * (LMPM_PMG_SEL) */
4794         uint32_t u_timestamp;   /* indicate when the date and time of the
4795                                  * compilation */
4796         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
4797 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4798
4799 /*
4800  * UMAC error struct - relevant starting from family 8000 chip.
4801  * Note: This structure is read from the device with IO accesses,
4802  * and the reading already does the endian conversion. As it is
4803  * read with u32-sized accesses, any members with a different size
4804  * need to be ordered correctly though!
4805  */
4806 struct iwm_umac_error_event_table {
4807         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4808         uint32_t error_id;      /* type of error */
4809         uint32_t blink1;        /* branch link */
4810         uint32_t blink2;        /* branch link */
4811         uint32_t ilink1;        /* interrupt link */
4812         uint32_t ilink2;        /* interrupt link */
4813         uint32_t data1;         /* error-specific data */
4814         uint32_t data2;         /* error-specific data */
4815         uint32_t data3;         /* error-specific data */
4816         uint32_t umac_major;
4817         uint32_t umac_minor;
4818         uint32_t frame_pointer; /* core register 27*/
4819         uint32_t stack_pointer; /* core register 28 */
4820         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
4821         uint32_t nic_isr_pref;  /* ISR status register */
4822 } __packed;
4823
4824 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4825 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4826
4827 #ifdef IWM_DEBUG
4828 struct {
4829         const char *name;
4830         uint8_t num;
4831 } advanced_lookup[] = {
4832         { "NMI_INTERRUPT_WDG", 0x34 },
4833         { "SYSASSERT", 0x35 },
4834         { "UCODE_VERSION_MISMATCH", 0x37 },
4835         { "BAD_COMMAND", 0x38 },
4836         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4837         { "FATAL_ERROR", 0x3D },
4838         { "NMI_TRM_HW_ERR", 0x46 },
4839         { "NMI_INTERRUPT_TRM", 0x4C },
4840         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4841         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4842         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4843         { "NMI_INTERRUPT_HOST", 0x66 },
4844         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4845         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4846         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4847         { "ADVANCED_SYSASSERT", 0 },
4848 };
4849
4850 static const char *
4851 iwm_desc_lookup(uint32_t num)
4852 {
4853         int i;
4854
4855         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4856                 if (advanced_lookup[i].num == num)
4857                         return advanced_lookup[i].name;
4858
4859         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4860         return advanced_lookup[i].name;
4861 }
4862
4863 static void
4864 iwm_nic_umac_error(struct iwm_softc *sc)
4865 {
4866         struct iwm_umac_error_event_table table;
4867         uint32_t base;
4868
4869         base = sc->sc_uc.uc_umac_error_event_table;
4870
4871         if (base < 0x800000) {
4872                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
4873                     base);
4874                 return;
4875         }
4876
4877         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4878                 device_printf(sc->sc_dev, "reading errlog failed\n");
4879                 return;
4880         }
4881
4882         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4883                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
4884                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4885                     sc->sc_flags, table.valid);
4886         }
4887
4888         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
4889                 iwm_desc_lookup(table.error_id));
4890         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
4891         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
4892         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
4893             table.ilink1);
4894         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
4895             table.ilink2);
4896         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
4897         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
4898         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
4899         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
4900         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
4901         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
4902             table.frame_pointer);
4903         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
4904             table.stack_pointer);
4905         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
4906         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
4907             table.nic_isr_pref);
4908 }
4909
4910 /*
4911  * Support for dumping the error log seemed like a good idea ...
4912  * but it's mostly hex junk and the only sensible thing is the
4913  * hw/ucode revision (which we know anyway).  Since it's here,
4914  * I'll just leave it in, just in case e.g. the Intel guys want to
4915  * help us decipher some "ADVANCED_SYSASSERT" later.
4916  */
4917 static void
4918 iwm_nic_error(struct iwm_softc *sc)
4919 {
4920         struct iwm_error_event_table table;
4921         uint32_t base;
4922
4923         device_printf(sc->sc_dev, "dumping device error log\n");
4924         base = sc->sc_uc.uc_error_event_table;
4925         if (base < 0x800000) {
4926                 device_printf(sc->sc_dev,
4927                     "Invalid error log pointer 0x%08x\n", base);
4928                 return;
4929         }
4930
4931         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4932                 device_printf(sc->sc_dev, "reading errlog failed\n");
4933                 return;
4934         }
4935
4936         if (!table.valid) {
4937                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
4938                 return;
4939         }
4940
4941         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4942                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
4943                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4944                     sc->sc_flags, table.valid);
4945         }
4946
4947         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
4948             iwm_desc_lookup(table.error_id));
4949         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
4950             table.trm_hw_status0);
4951         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
4952             table.trm_hw_status1);
4953         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
4954         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
4955         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
4956         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
4957         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
4958         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
4959         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
4960         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
4961         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
4962         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
4963         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
4964         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
4965             table.fw_rev_type);
4966         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
4967         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
4968         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
4969         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
4970         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
4971         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
4972         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
4973         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
4974         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
4975         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
4976         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
4977         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
4978         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
4979         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
4980         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
4981         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
4982         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
4983         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
4984         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
4985
4986         if (sc->sc_uc.uc_umac_error_event_table)
4987                 iwm_nic_umac_error(sc);
4988 }
4989 #endif
4990
4991 #define SYNC_RESP_STRUCT(_var_, _pkt_)                                  \
4992 do {                                                                    \
4993         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4994         _var_ = (void *)((_pkt_)+1);                                    \
4995 } while (/*CONSTCOND*/0)
4996
4997 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)                              \
4998 do {                                                                    \
4999         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
5000         _ptr_ = (void *)((_pkt_)+1);                                    \
5001 } while (/*CONSTCOND*/0)
5002
5003 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5004
5005 /*
5006  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5007  * Basic structure from if_iwn
5008  */
5009 static void
5010 iwm_notif_intr(struct iwm_softc *sc)
5011 {
5012         struct ieee80211com *ic = &sc->sc_ic;
5013         uint16_t hw;
5014
5015         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5016             BUS_DMASYNC_POSTREAD);
5017
5018         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5019
5020         /*
5021          * Process responses
5022          */
5023         while (sc->rxq.cur != hw) {
5024                 struct iwm_rx_ring *ring = &sc->rxq;
5025                 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
5026                 struct iwm_rx_packet *pkt;
5027                 struct iwm_cmd_response *cresp;
5028                 int qid, idx, code;
5029
5030                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5031                     BUS_DMASYNC_POSTREAD);
5032                 pkt = mtod(data->m, struct iwm_rx_packet *);
5033
5034                 qid = pkt->hdr.qid & ~0x80;
5035                 idx = pkt->hdr.idx;
5036
5037                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5038                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5039                     "rx packet qid=%d idx=%d type=%x %d %d\n",
5040                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, sc->rxq.cur, hw);
5041
5042                 /*
5043                  * randomly get these from the firmware, no idea why.
5044                  * they at least seem harmless, so just ignore them for now
5045                  */
5046                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5047                     || pkt->len_n_flags == htole32(0x55550000))) {
5048                         ADVANCE_RXQ(sc);
5049                         continue;
5050                 }
5051
5052                 switch (code) {
5053                 case IWM_REPLY_RX_PHY_CMD:
5054                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5055                         break;
5056
5057                 case IWM_REPLY_RX_MPDU_CMD:
5058                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5059                         break;
5060
5061                 case IWM_TX_CMD:
5062                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
5063                         break;
5064
5065                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5066                         struct iwm_missed_beacons_notif *resp;
5067                         int missed;
5068
5069                         /* XXX look at mac_id to determine interface ID */
5070                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5071
5072                         SYNC_RESP_STRUCT(resp, pkt);
5073                         missed = le32toh(resp->consec_missed_beacons);
5074
5075                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5076                             "%s: MISSED_BEACON: mac_id=%d, "
5077                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5078                             "num_rx=%d\n",
5079                             __func__,
5080                             le32toh(resp->mac_id),
5081                             le32toh(resp->consec_missed_beacons_since_last_rx),
5082                             le32toh(resp->consec_missed_beacons),
5083                             le32toh(resp->num_expected_beacons),
5084                             le32toh(resp->num_recvd_beacons));
5085
5086                         /* Be paranoid */
5087                         if (vap == NULL)
5088                                 break;
5089
5090                         /* XXX no net80211 locking? */
5091                         if (vap->iv_state == IEEE80211_S_RUN &&
5092                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5093                                 if (missed > vap->iv_bmissthreshold) {
5094                                         /* XXX bad locking; turn into task */
5095                                         IWM_UNLOCK(sc);
5096                                         ieee80211_beacon_miss(ic);
5097                                         IWM_LOCK(sc);
5098                                 }
5099                         }
5100
5101                         break; }
5102
5103                 case IWM_MFUART_LOAD_NOTIFICATION:
5104                         break;
5105
5106                 case IWM_MVM_ALIVE: {
5107                         struct iwm_mvm_alive_resp_v1 *resp1;
5108                         struct iwm_mvm_alive_resp_v2 *resp2;
5109                         struct iwm_mvm_alive_resp_v3 *resp3;
5110
5111                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
5112                                 SYNC_RESP_STRUCT(resp1, pkt);
5113                                 sc->sc_uc.uc_error_event_table
5114                                     = le32toh(resp1->error_event_table_ptr);
5115                                 sc->sc_uc.uc_log_event_table
5116                                     = le32toh(resp1->log_event_table_ptr);
5117                                 sc->sched_base = le32toh(resp1->scd_base_ptr);
5118                                 if (resp1->status == IWM_ALIVE_STATUS_OK)
5119                                         sc->sc_uc.uc_ok = 1;
5120                                 else
5121                                         sc->sc_uc.uc_ok = 0;
5122                         }
5123
5124                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
5125                                 SYNC_RESP_STRUCT(resp2, pkt);
5126                                 sc->sc_uc.uc_error_event_table
5127                                     = le32toh(resp2->error_event_table_ptr);
5128                                 sc->sc_uc.uc_log_event_table
5129                                     = le32toh(resp2->log_event_table_ptr);
5130                                 sc->sched_base = le32toh(resp2->scd_base_ptr);
5131                                 sc->sc_uc.uc_umac_error_event_table
5132                                     = le32toh(resp2->error_info_addr);
5133                                 if (resp2->status == IWM_ALIVE_STATUS_OK)
5134                                         sc->sc_uc.uc_ok = 1;
5135                                 else
5136                                         sc->sc_uc.uc_ok = 0;
5137                         }
5138
5139                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
5140                                 SYNC_RESP_STRUCT(resp3, pkt);
5141                                 sc->sc_uc.uc_error_event_table
5142                                     = le32toh(resp3->error_event_table_ptr);
5143                                 sc->sc_uc.uc_log_event_table
5144                                     = le32toh(resp3->log_event_table_ptr);
5145                                 sc->sched_base = le32toh(resp3->scd_base_ptr);
5146                                 sc->sc_uc.uc_umac_error_event_table
5147                                     = le32toh(resp3->error_info_addr);
5148                                 if (resp3->status == IWM_ALIVE_STATUS_OK)
5149                                         sc->sc_uc.uc_ok = 1;
5150                                 else
5151                                         sc->sc_uc.uc_ok = 0;
5152                         }
5153
5154                         sc->sc_uc.uc_intr = 1;
5155                         wakeup(&sc->sc_uc);
5156                         break; }
5157
5158                 case IWM_CALIB_RES_NOTIF_PHY_DB: {
5159                         struct iwm_calib_res_notif_phy_db *phy_db_notif;
5160                         SYNC_RESP_STRUCT(phy_db_notif, pkt);
5161
5162                         iwm_phy_db_set_section(sc, phy_db_notif);
5163
5164                         break; }
5165
5166                 case IWM_STATISTICS_NOTIFICATION: {
5167                         struct iwm_notif_statistics *stats;
5168                         SYNC_RESP_STRUCT(stats, pkt);
5169                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5170                         sc->sc_noise = iwm_get_noise(&stats->rx.general);
5171                         break; }
5172
5173                 case IWM_NVM_ACCESS_CMD:
5174                 case IWM_MCC_UPDATE_CMD:
5175                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5176                                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5177                                     BUS_DMASYNC_POSTREAD);
5178                                 memcpy(sc->sc_cmd_resp,
5179                                     pkt, sizeof(sc->sc_cmd_resp));
5180                         }
5181                         break;
5182
5183                 case IWM_MCC_CHUB_UPDATE_CMD: {
5184                         struct iwm_mcc_chub_notif *notif;
5185                         SYNC_RESP_STRUCT(notif, pkt);
5186
5187                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5188                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5189                         sc->sc_fw_mcc[2] = '\0';
5190                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5191                             "fw source %d sent CC '%s'\n",
5192                             notif->source_id, sc->sc_fw_mcc);
5193                         break; }
5194
5195                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5196                         break;
5197
5198                 case IWM_PHY_CONFIGURATION_CMD:
5199                 case IWM_TX_ANT_CONFIGURATION_CMD:
5200                 case IWM_ADD_STA:
5201                 case IWM_MAC_CONTEXT_CMD:
5202                 case IWM_REPLY_SF_CFG_CMD:
5203                 case IWM_POWER_TABLE_CMD:
5204                 case IWM_PHY_CONTEXT_CMD:
5205                 case IWM_BINDING_CONTEXT_CMD:
5206                 case IWM_TIME_EVENT_CMD:
5207                 case IWM_SCAN_REQUEST_CMD:
5208                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5209                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5210                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5211                 case IWM_REPLY_BEACON_FILTERING_CMD:
5212                 case IWM_MAC_PM_POWER_TABLE:
5213                 case IWM_TIME_QUOTA_CMD:
5214                 case IWM_REMOVE_STA:
5215                 case IWM_TXPATH_FLUSH:
5216                 case IWM_LQ_CMD:
5217                 case IWM_BT_CONFIG:
5218                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5219                         SYNC_RESP_STRUCT(cresp, pkt);
5220                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5221                                 memcpy(sc->sc_cmd_resp,
5222                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5223                         }
5224                         break;
5225
5226                 /* ignore */
5227                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5228                         break;
5229
5230                 case IWM_INIT_COMPLETE_NOTIF:
5231                         sc->sc_init_complete = 1;
5232                         wakeup(&sc->sc_init_complete);
5233                         break;
5234
5235                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5236                         struct iwm_periodic_scan_complete *notif;
5237                         SYNC_RESP_STRUCT(notif, pkt);
5238                         break;
5239                 }
5240
5241                 case IWM_SCAN_ITERATION_COMPLETE: {
5242                         struct iwm_lmac_scan_complete_notif *notif;
5243                         SYNC_RESP_STRUCT(notif, pkt);
5244                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5245                         break;
5246                 }
5247  
5248                 case IWM_SCAN_COMPLETE_UMAC: {
5249                         struct iwm_umac_scan_complete *notif;
5250                         SYNC_RESP_STRUCT(notif, pkt);
5251
5252                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5253                             "UMAC scan complete, status=0x%x\n",
5254                             notif->status);
5255 #if 0   /* XXX This would be a duplicate scan end call */
5256                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5257 #endif
5258                         break;
5259                 }
5260
5261                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5262                         struct iwm_umac_scan_iter_complete_notif *notif;
5263                         SYNC_RESP_STRUCT(notif, pkt);
5264
5265                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5266                             "complete, status=0x%x, %d channels scanned\n",
5267                             notif->status, notif->scanned_channels);
5268                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5269                         break;
5270                 }
5271
5272                 case IWM_REPLY_ERROR: {
5273                         struct iwm_error_resp *resp;
5274                         SYNC_RESP_STRUCT(resp, pkt);
5275
5276                         device_printf(sc->sc_dev,
5277                             "firmware error 0x%x, cmd 0x%x\n",
5278                             le32toh(resp->error_type),
5279                             resp->cmd_id);
5280                         break;
5281                 }
5282
5283                 case IWM_TIME_EVENT_NOTIFICATION: {
5284                         struct iwm_time_event_notif *notif;
5285                         SYNC_RESP_STRUCT(notif, pkt);
5286
5287                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5288                             "TE notif status = 0x%x action = 0x%x\n",
5289                             notif->status, notif->action);
5290                         break;
5291                 }
5292
5293                 case IWM_MCAST_FILTER_CMD:
5294                         break;
5295
5296                 case IWM_SCD_QUEUE_CFG: {
5297                         struct iwm_scd_txq_cfg_rsp *rsp;
5298                         SYNC_RESP_STRUCT(rsp, pkt);
5299
5300                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5301                             "queue cfg token=0x%x sta_id=%d "
5302                             "tid=%d scd_queue=%d\n",
5303                             rsp->token, rsp->sta_id, rsp->tid,
5304                             rsp->scd_queue);
5305                         break;
5306                 }
5307
5308                 default:
5309                         device_printf(sc->sc_dev,
5310                             "frame %d/%d %x UNHANDLED (this should "
5311                             "not happen)\n", qid, idx,
5312                             pkt->len_n_flags);
5313                         break;
5314                 }
5315
5316                 /*
5317                  * Why test bit 0x80?  The Linux driver:
5318                  *
5319                  * There is one exception:  uCode sets bit 15 when it
5320                  * originates the response/notification, i.e. when the
5321                  * response/notification is not a direct response to a
5322                  * command sent by the driver.  For example, uCode issues
5323                  * IWM_REPLY_RX when it sends a received frame to the driver;
5324                  * it is not a direct response to any driver command.
5325                  *
5326                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5327                  * uses a slightly different format for pkt->hdr, and "qid"
5328                  * is actually the upper byte of a two-byte field.
5329                  */
5330                 if (!(pkt->hdr.qid & (1 << 7))) {
5331                         iwm_cmd_done(sc, pkt);
5332                 }
5333
5334                 ADVANCE_RXQ(sc);
5335         }
5336
5337         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
5338             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5339
5340         /*
5341          * Tell the firmware what we have processed.
5342          * Seems like the hardware gets upset unless we align
5343          * the write by 8??
5344          */
5345         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5346         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5347 }
5348
5349 static void
5350 iwm_intr(void *arg)
5351 {
5352         struct iwm_softc *sc = arg;
5353         int handled = 0;
5354         int r1, r2, rv = 0;
5355         int isperiodic = 0;
5356
5357         IWM_LOCK(sc);
5358         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5359
5360         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5361                 uint32_t *ict = sc->ict_dma.vaddr;
5362                 int tmp;
5363
5364                 tmp = htole32(ict[sc->ict_cur]);
5365                 if (!tmp)
5366                         goto out_ena;
5367
5368                 /*
5369                  * ok, there was something.  keep plowing until we have all.
5370                  */
5371                 r1 = r2 = 0;
5372                 while (tmp) {
5373                         r1 |= tmp;
5374                         ict[sc->ict_cur] = 0;
5375                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5376                         tmp = htole32(ict[sc->ict_cur]);
5377                 }
5378
5379                 /* this is where the fun begins.  don't ask */
5380                 if (r1 == 0xffffffff)
5381                         r1 = 0;
5382
5383                 /* i am not expected to understand this */
5384                 if (r1 & 0xc0000)
5385                         r1 |= 0x8000;
5386                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5387         } else {
5388                 r1 = IWM_READ(sc, IWM_CSR_INT);
5389                 /* "hardware gone" (where, fishing?) */
5390                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5391                         goto out;
5392                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5393         }
5394         if (r1 == 0 && r2 == 0) {
5395                 goto out_ena;
5396         }
5397
5398         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5399
5400         /* ignored */
5401         handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
5402
5403         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5404                 int i;
5405                 struct ieee80211com *ic = &sc->sc_ic;
5406                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5407
5408 #ifdef IWM_DEBUG
5409                 iwm_nic_error(sc);
5410 #endif
5411                 /* Dump driver status (TX and RX rings) while we're here. */
5412                 device_printf(sc->sc_dev, "driver status:\n");
5413                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5414                         struct iwm_tx_ring *ring = &sc->txq[i];
5415                         device_printf(sc->sc_dev,
5416                             "  tx ring %2d: qid=%-2d cur=%-3d "
5417                             "queued=%-3d\n",
5418                             i, ring->qid, ring->cur, ring->queued);
5419                 }
5420                 device_printf(sc->sc_dev,
5421                     "  rx ring: cur=%d\n", sc->rxq.cur);
5422                 device_printf(sc->sc_dev,
5423                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5424
5425                 /* Don't stop the device; just do a VAP restart */
5426                 IWM_UNLOCK(sc);
5427
5428                 if (vap == NULL) {
5429                         printf("%s: null vap\n", __func__);
5430                         return;
5431                 }
5432
5433                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5434                     "restarting\n", __func__, vap->iv_state);
5435
5436                 /* XXX TODO: turn this into a callout/taskqueue */
5437                 ieee80211_restart_all(ic);
5438                 return;
5439         }
5440
5441         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5442                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5443                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5444                 iwm_stop(sc);
5445                 rv = 1;
5446                 goto out;
5447         }
5448
5449         /* firmware chunk loaded */
5450         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5451                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5452                 handled |= IWM_CSR_INT_BIT_FH_TX;
5453                 sc->sc_fw_chunk_done = 1;
5454                 wakeup(&sc->sc_fw);
5455         }
5456
5457         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5458                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5459                 if (iwm_check_rfkill(sc)) {
5460                         device_printf(sc->sc_dev,
5461                             "%s: rfkill switch, disabling interface\n",
5462                             __func__);
5463                         iwm_stop(sc);
5464                 }
5465         }
5466
5467         /*
5468          * The Linux driver uses periodic interrupts to avoid races.
5469          * We cargo-cult like it's going out of fashion.
5470          */
5471         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5472                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5473                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5474                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5475                         IWM_WRITE_1(sc,
5476                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5477                 isperiodic = 1;
5478         }
5479
5480         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5481                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5482                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5483
5484                 iwm_notif_intr(sc);
5485
5486                 /* enable periodic interrupt, see above */
5487                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5488                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5489                             IWM_CSR_INT_PERIODIC_ENA);
5490         }
5491
5492         if (__predict_false(r1 & ~handled))
5493                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5494                     "%s: unhandled interrupts: %x\n", __func__, r1);
5495         rv = 1;
5496
5497  out_ena:
5498         iwm_restore_interrupts(sc);
5499  out:
5500         IWM_UNLOCK(sc);
5501         return;
5502 }
5503
5504 /*
5505  * Autoconf glue-sniffing
5506  */
5507 #define PCI_VENDOR_INTEL                0x8086
5508 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5509 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5510 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5511 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5512 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5513 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5514 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5515 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5516 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5517 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5518
5519 static const struct iwm_devices {
5520         uint16_t        device;
5521         const char      *name;
5522 } iwm_devices[] = {
5523         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5524         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5525         { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5526         { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5527         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5528         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5529         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5530         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5531         { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5532         { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5533 };
5534
5535 static int
5536 iwm_probe(device_t dev)
5537 {
5538         int i;
5539
5540         for (i = 0; i < nitems(iwm_devices); i++) {
5541                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5542                     pci_get_device(dev) == iwm_devices[i].device) {
5543                         device_set_desc(dev, iwm_devices[i].name);
5544                         return (BUS_PROBE_DEFAULT);
5545                 }
5546         }
5547
5548         return (ENXIO);
5549 }
5550
5551 static int
5552 iwm_dev_check(device_t dev)
5553 {
5554         struct iwm_softc *sc;
5555
5556         sc = device_get_softc(dev);
5557
5558         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5559         switch (pci_get_device(dev)) {
5560         case PCI_PRODUCT_INTEL_WL_3160_1:
5561         case PCI_PRODUCT_INTEL_WL_3160_2:
5562                 sc->sc_fwname = "iwm3160fw";
5563                 sc->host_interrupt_operation_mode = 1;
5564                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5565                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5566                 return (0);
5567         case PCI_PRODUCT_INTEL_WL_3165_1:
5568         case PCI_PRODUCT_INTEL_WL_3165_2:
5569                 sc->sc_fwname = "iwm7265fw";
5570                 sc->host_interrupt_operation_mode = 0;
5571                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5572                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5573                 return (0);
5574         case PCI_PRODUCT_INTEL_WL_7260_1:
5575         case PCI_PRODUCT_INTEL_WL_7260_2:
5576                 sc->sc_fwname = "iwm7260fw";
5577                 sc->host_interrupt_operation_mode = 1;
5578                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5579                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5580                 return (0);
5581         case PCI_PRODUCT_INTEL_WL_7265_1:
5582         case PCI_PRODUCT_INTEL_WL_7265_2:
5583                 sc->sc_fwname = "iwm7265fw";
5584                 sc->host_interrupt_operation_mode = 0;
5585                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5586                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5587                 return (0);
5588         case PCI_PRODUCT_INTEL_WL_8260_1:
5589         case PCI_PRODUCT_INTEL_WL_8260_2:
5590                 sc->sc_fwname = "iwm8000Cfw";
5591                 sc->host_interrupt_operation_mode = 0;
5592                 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
5593                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
5594                 return (0);
5595         default:
5596                 device_printf(dev, "unknown adapter type\n");
5597                 return ENXIO;
5598         }
5599 }
5600
5601 static int
5602 iwm_pci_attach(device_t dev)
5603 {
5604         struct iwm_softc *sc;
5605         int count, error, rid;
5606         uint16_t reg;
5607
5608         sc = device_get_softc(dev);
5609
5610         /* Clear device-specific "PCI retry timeout" register (41h). */
5611         reg = pci_read_config(dev, 0x40, sizeof(reg));
5612         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5613
5614         /* Enable bus-mastering and hardware bug workaround. */
5615         pci_enable_busmaster(dev);
5616         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5617         /* if !MSI */
5618         if (reg & PCIM_STATUS_INTxSTATE) {
5619                 reg &= ~PCIM_STATUS_INTxSTATE;
5620         }
5621         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5622
5623         rid = PCIR_BAR(0);
5624         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5625             RF_ACTIVE);
5626         if (sc->sc_mem == NULL) {
5627                 device_printf(sc->sc_dev, "can't map mem space\n");
5628                 return (ENXIO);
5629         }
5630         sc->sc_st = rman_get_bustag(sc->sc_mem);
5631         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5632
5633         /* Install interrupt handler. */
5634         count = 1;
5635         rid = 0;
5636         if (pci_alloc_msi(dev, &count) == 0)
5637                 rid = 1;
5638         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5639             (rid != 0 ? 0 : RF_SHAREABLE));
5640         if (sc->sc_irq == NULL) {
5641                 device_printf(dev, "can't map interrupt\n");
5642                         return (ENXIO);
5643         }
5644         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5645             NULL, iwm_intr, sc, &sc->sc_ih);
5646         if (sc->sc_ih == NULL) {
5647                 device_printf(dev, "can't establish interrupt");
5648                         return (ENXIO);
5649         }
5650         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5651
5652         return (0);
5653 }
5654
5655 static void
5656 iwm_pci_detach(device_t dev)
5657 {
5658         struct iwm_softc *sc = device_get_softc(dev);
5659
5660         if (sc->sc_irq != NULL) {
5661                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5662                 bus_release_resource(dev, SYS_RES_IRQ,
5663                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5664                 pci_release_msi(dev);
5665         }
5666         if (sc->sc_mem != NULL)
5667                 bus_release_resource(dev, SYS_RES_MEMORY,
5668                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5669 }
5670
5671
5672
5673 static int
5674 iwm_attach(device_t dev)
5675 {
5676         struct iwm_softc *sc = device_get_softc(dev);
5677         struct ieee80211com *ic = &sc->sc_ic;
5678         int error;
5679         int txq_i, i;
5680
5681         sc->sc_dev = dev;
5682         IWM_LOCK_INIT(sc);
5683         mbufq_init(&sc->sc_snd, ifqmaxlen);
5684         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5685         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5686         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5687
5688         /* PCI attach */
5689         error = iwm_pci_attach(dev);
5690         if (error != 0)
5691                 goto fail;
5692
5693         sc->sc_wantresp = -1;
5694
5695         /* Check device type */
5696         error = iwm_dev_check(dev);
5697         if (error != 0)
5698                 goto fail;
5699
5700         /*
5701          * We now start fiddling with the hardware
5702          */
5703         /*
5704          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5705          * changed, and now the revision step also includes bit 0-1 (no more
5706          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5707          * in the old format.
5708          */
5709         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
5710                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5711                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5712
5713         if (iwm_prepare_card_hw(sc) != 0) {
5714                 device_printf(dev, "could not initialize hardware\n");
5715                 goto fail;
5716         }
5717
5718         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
5719                 int ret;
5720                 uint32_t hw_step;
5721
5722                 /*
5723                  * In order to recognize C step the driver should read the
5724                  * chip version id located at the AUX bus MISC address.
5725                  */
5726                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5727                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5728                 DELAY(2);
5729
5730                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5731                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5732                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5733                                    25000);
5734                 if (ret < 0) {
5735                         device_printf(sc->sc_dev,
5736                             "Failed to wake up the nic\n");
5737                         goto fail;
5738                 }
5739
5740                 if (iwm_nic_lock(sc)) {
5741                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5742                         hw_step |= IWM_ENABLE_WFPM;
5743                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5744                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5745                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5746                         if (hw_step == 0x3)
5747                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5748                                                 (IWM_SILICON_C_STEP << 2);
5749                         iwm_nic_unlock(sc);
5750                 } else {
5751                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
5752                         goto fail;
5753                 }
5754         }
5755
5756         /* Allocate DMA memory for firmware transfers. */
5757         if ((error = iwm_alloc_fwmem(sc)) != 0) {
5758                 device_printf(dev, "could not allocate memory for firmware\n");
5759                 goto fail;
5760         }
5761
5762         /* Allocate "Keep Warm" page. */
5763         if ((error = iwm_alloc_kw(sc)) != 0) {
5764                 device_printf(dev, "could not allocate keep warm page\n");
5765                 goto fail;
5766         }
5767
5768         /* We use ICT interrupts */
5769         if ((error = iwm_alloc_ict(sc)) != 0) {
5770                 device_printf(dev, "could not allocate ICT table\n");
5771                 goto fail;
5772         }
5773
5774         /* Allocate TX scheduler "rings". */
5775         if ((error = iwm_alloc_sched(sc)) != 0) {
5776                 device_printf(dev, "could not allocate TX scheduler rings\n");
5777                 goto fail;
5778         }
5779
5780         /* Allocate TX rings */
5781         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5782                 if ((error = iwm_alloc_tx_ring(sc,
5783                     &sc->txq[txq_i], txq_i)) != 0) {
5784                         device_printf(dev,
5785                             "could not allocate TX ring %d\n",
5786                             txq_i);
5787                         goto fail;
5788                 }
5789         }
5790
5791         /* Allocate RX ring. */
5792         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5793                 device_printf(dev, "could not allocate RX ring\n");
5794                 goto fail;
5795         }
5796
5797         /* Clear pending interrupts. */
5798         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5799
5800         ic->ic_softc = sc;
5801         ic->ic_name = device_get_nameunit(sc->sc_dev);
5802         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
5803         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
5804
5805         /* Set device capabilities. */
5806         ic->ic_caps =
5807             IEEE80211_C_STA |
5808             IEEE80211_C_WPA |           /* WPA/RSN */
5809             IEEE80211_C_WME |
5810             IEEE80211_C_SHSLOT |        /* short slot time supported */
5811             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
5812 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
5813             ;
5814         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5815                 sc->sc_phyctxt[i].id = i;
5816                 sc->sc_phyctxt[i].color = 0;
5817                 sc->sc_phyctxt[i].ref = 0;
5818                 sc->sc_phyctxt[i].channel = NULL;
5819         }
5820
5821         /* Max RSSI */
5822         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5823         sc->sc_preinit_hook.ich_func = iwm_preinit;
5824         sc->sc_preinit_hook.ich_arg = sc;
5825         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5826                 device_printf(dev, "config_intrhook_establish failed\n");
5827                 goto fail;
5828         }
5829
5830 #ifdef IWM_DEBUG
5831         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5832             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
5833             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
5834 #endif
5835
5836         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5837             "<-%s\n", __func__);
5838
5839         return 0;
5840
5841         /* Free allocated memory if something failed during attachment. */
5842 fail:
5843         iwm_detach_local(sc, 0);
5844
5845         return ENXIO;
5846 }
5847
5848 static int
5849 iwm_is_valid_ether_addr(uint8_t *addr)
5850 {
5851         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
5852
5853         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
5854                 return (FALSE);
5855
5856         return (TRUE);
5857 }
5858
5859 static int
5860 iwm_update_edca(struct ieee80211com *ic)
5861 {
5862         struct iwm_softc *sc = ic->ic_softc;
5863
5864         device_printf(sc->sc_dev, "%s: called\n", __func__);
5865         return (0);
5866 }
5867
5868 static void
5869 iwm_preinit(void *arg)
5870 {
5871         struct iwm_softc *sc = arg;
5872         device_t dev = sc->sc_dev;
5873         struct ieee80211com *ic = &sc->sc_ic;
5874         int error;
5875
5876         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5877             "->%s\n", __func__);
5878
5879         IWM_LOCK(sc);
5880         if ((error = iwm_start_hw(sc)) != 0) {
5881                 device_printf(dev, "could not initialize hardware\n");
5882                 IWM_UNLOCK(sc);
5883                 goto fail;
5884         }
5885
5886         error = iwm_run_init_mvm_ucode(sc, 1);
5887         iwm_stop_device(sc);
5888         if (error) {
5889                 IWM_UNLOCK(sc);
5890                 goto fail;
5891         }
5892         device_printf(dev,
5893             "hw rev 0x%x, fw ver %s, address %s\n",
5894             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
5895             sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
5896
5897         /* not all hardware can do 5GHz band */
5898         if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
5899                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
5900                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
5901         IWM_UNLOCK(sc);
5902
5903         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
5904             ic->ic_channels);
5905
5906         /*
5907          * At this point we've committed - if we fail to do setup,
5908          * we now also have to tear down the net80211 state.
5909          */
5910         ieee80211_ifattach(ic);
5911         ic->ic_vap_create = iwm_vap_create;
5912         ic->ic_vap_delete = iwm_vap_delete;
5913         ic->ic_raw_xmit = iwm_raw_xmit;
5914         ic->ic_node_alloc = iwm_node_alloc;
5915         ic->ic_scan_start = iwm_scan_start;
5916         ic->ic_scan_end = iwm_scan_end;
5917         ic->ic_update_mcast = iwm_update_mcast;
5918         ic->ic_getradiocaps = iwm_init_channel_map;
5919         ic->ic_set_channel = iwm_set_channel;
5920         ic->ic_scan_curchan = iwm_scan_curchan;
5921         ic->ic_scan_mindwell = iwm_scan_mindwell;
5922         ic->ic_wme.wme_update = iwm_update_edca;
5923         ic->ic_parent = iwm_parent;
5924         ic->ic_transmit = iwm_transmit;
5925         iwm_radiotap_attach(sc);
5926         if (bootverbose)
5927                 ieee80211_announce(ic);
5928
5929         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5930             "<-%s\n", __func__);
5931         config_intrhook_disestablish(&sc->sc_preinit_hook);
5932
5933         return;
5934 fail:
5935         config_intrhook_disestablish(&sc->sc_preinit_hook);
5936         iwm_detach_local(sc, 0);
5937 }
5938
5939 /*
5940  * Attach the interface to 802.11 radiotap.
5941  */
5942 static void
5943 iwm_radiotap_attach(struct iwm_softc *sc)
5944 {
5945         struct ieee80211com *ic = &sc->sc_ic;
5946
5947         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5948             "->%s begin\n", __func__);
5949         ieee80211_radiotap_attach(ic,
5950             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
5951                 IWM_TX_RADIOTAP_PRESENT,
5952             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
5953                 IWM_RX_RADIOTAP_PRESENT);
5954         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5955             "->%s end\n", __func__);
5956 }
5957
5958 static struct ieee80211vap *
5959 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
5960     enum ieee80211_opmode opmode, int flags,
5961     const uint8_t bssid[IEEE80211_ADDR_LEN],
5962     const uint8_t mac[IEEE80211_ADDR_LEN])
5963 {
5964         struct iwm_vap *ivp;
5965         struct ieee80211vap *vap;
5966
5967         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
5968                 return NULL;
5969         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
5970         vap = &ivp->iv_vap;
5971         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
5972         vap->iv_bmissthreshold = 10;            /* override default */
5973         /* Override with driver methods. */
5974         ivp->iv_newstate = vap->iv_newstate;
5975         vap->iv_newstate = iwm_newstate;
5976
5977         ieee80211_ratectl_init(vap);
5978         /* Complete setup. */
5979         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
5980             mac);
5981         ic->ic_opmode = opmode;
5982
5983         return vap;
5984 }
5985
5986 static void
5987 iwm_vap_delete(struct ieee80211vap *vap)
5988 {
5989         struct iwm_vap *ivp = IWM_VAP(vap);
5990
5991         ieee80211_ratectl_deinit(vap);
5992         ieee80211_vap_detach(vap);
5993         free(ivp, M_80211_VAP);
5994 }
5995
5996 static void
5997 iwm_scan_start(struct ieee80211com *ic)
5998 {
5999         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6000         struct iwm_softc *sc = ic->ic_softc;
6001         int error;
6002
6003         IWM_LOCK(sc);
6004         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6005                 error = iwm_mvm_umac_scan(sc);
6006         else
6007                 error = iwm_mvm_lmac_scan(sc);
6008         if (error != 0) {
6009                 device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
6010                 IWM_UNLOCK(sc);
6011                 ieee80211_cancel_scan(vap);
6012         } else {
6013                 iwm_led_blink_start(sc);
6014                 IWM_UNLOCK(sc);
6015         }
6016 }
6017
6018 static void
6019 iwm_scan_end(struct ieee80211com *ic)
6020 {
6021         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6022         struct iwm_softc *sc = ic->ic_softc;
6023
6024         IWM_LOCK(sc);
6025         iwm_led_blink_stop(sc);
6026         if (vap->iv_state == IEEE80211_S_RUN)
6027                 iwm_mvm_led_enable(sc);
6028         IWM_UNLOCK(sc);
6029 }
6030
6031 static void
6032 iwm_update_mcast(struct ieee80211com *ic)
6033 {
6034 }
6035
6036 static void
6037 iwm_set_channel(struct ieee80211com *ic)
6038 {
6039 }
6040
6041 static void
6042 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6043 {
6044 }
6045
6046 static void
6047 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6048 {
6049         return;
6050 }
6051
6052 void
6053 iwm_init_task(void *arg1)
6054 {
6055         struct iwm_softc *sc = arg1;
6056
6057         IWM_LOCK(sc);
6058         while (sc->sc_flags & IWM_FLAG_BUSY)
6059                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6060         sc->sc_flags |= IWM_FLAG_BUSY;
6061         iwm_stop(sc);
6062         if (sc->sc_ic.ic_nrunning > 0)
6063                 iwm_init(sc);
6064         sc->sc_flags &= ~IWM_FLAG_BUSY;
6065         wakeup(&sc->sc_flags);
6066         IWM_UNLOCK(sc);
6067 }
6068
6069 static int
6070 iwm_resume(device_t dev)
6071 {
6072         struct iwm_softc *sc = device_get_softc(dev);
6073         int do_reinit = 0;
6074         uint16_t reg;
6075
6076         /* Clear device-specific "PCI retry timeout" register (41h). */
6077         reg = pci_read_config(dev, 0x40, sizeof(reg));
6078         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
6079         iwm_init_task(device_get_softc(dev));
6080
6081         IWM_LOCK(sc);
6082         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6083                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6084                 do_reinit = 1;
6085         }
6086         IWM_UNLOCK(sc);
6087
6088         if (do_reinit)
6089                 ieee80211_resume_all(&sc->sc_ic);
6090
6091         return 0;
6092 }
6093
6094 static int
6095 iwm_suspend(device_t dev)
6096 {
6097         int do_stop = 0;
6098         struct iwm_softc *sc = device_get_softc(dev);
6099
6100         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6101
6102         ieee80211_suspend_all(&sc->sc_ic);
6103
6104         if (do_stop) {
6105                 IWM_LOCK(sc);
6106                 iwm_stop(sc);
6107                 sc->sc_flags |= IWM_FLAG_SCANNING;
6108                 IWM_UNLOCK(sc);
6109         }
6110
6111         return (0);
6112 }
6113
6114 static int
6115 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6116 {
6117         struct iwm_fw_info *fw = &sc->sc_fw;
6118         device_t dev = sc->sc_dev;
6119         int i;
6120
6121         ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6122
6123         callout_drain(&sc->sc_led_blink_to);
6124         callout_drain(&sc->sc_watchdog_to);
6125         iwm_stop_device(sc);
6126         if (do_net80211) {
6127                 ieee80211_ifdetach(&sc->sc_ic);
6128         }
6129
6130         iwm_phy_db_free(sc);
6131
6132         /* Free descriptor rings */
6133         iwm_free_rx_ring(sc, &sc->rxq);
6134         for (i = 0; i < nitems(sc->txq); i++)
6135                 iwm_free_tx_ring(sc, &sc->txq[i]);
6136
6137         /* Free firmware */
6138         if (fw->fw_fp != NULL)
6139                 iwm_fw_info_free(fw);
6140
6141         /* Free scheduler */
6142         iwm_free_sched(sc);
6143         if (sc->ict_dma.vaddr != NULL)
6144                 iwm_free_ict(sc);
6145         if (sc->kw_dma.vaddr != NULL)
6146                 iwm_free_kw(sc);
6147         if (sc->fw_dma.vaddr != NULL)
6148                 iwm_free_fwmem(sc);
6149
6150         /* Finished with the hardware - detach things */
6151         iwm_pci_detach(dev);
6152
6153         mbufq_drain(&sc->sc_snd);
6154         IWM_LOCK_DESTROY(sc);
6155
6156         return (0);
6157 }
6158
6159 static int
6160 iwm_detach(device_t dev)
6161 {
6162         struct iwm_softc *sc = device_get_softc(dev);
6163
6164         return (iwm_detach_local(sc, 1));
6165 }
6166
6167 static device_method_t iwm_pci_methods[] = {
6168         /* Device interface */
6169         DEVMETHOD(device_probe,         iwm_probe),
6170         DEVMETHOD(device_attach,        iwm_attach),
6171         DEVMETHOD(device_detach,        iwm_detach),
6172         DEVMETHOD(device_suspend,       iwm_suspend),
6173         DEVMETHOD(device_resume,        iwm_resume),
6174
6175         DEVMETHOD_END
6176 };
6177
6178 static driver_t iwm_pci_driver = {
6179         "iwm",
6180         iwm_pci_methods,
6181         sizeof (struct iwm_softc)
6182 };
6183
6184 static devclass_t iwm_devclass;
6185
6186 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6187 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6188 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6189 MODULE_DEPEND(iwm, wlan, 1, 1, 1);