]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
MFC r30600
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109
110 #include <sys/param.h>
111 #include <sys/bus.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
126
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
130
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
133
134 #include <net/bpf.h>
135
136 #include <net/if.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
142
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
147
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
152
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_util.h>
157 #include <dev/iwm/if_iwm_binding.h>
158 #include <dev/iwm/if_iwm_phy_db.h>
159 #include <dev/iwm/if_iwm_mac_ctxt.h>
160 #include <dev/iwm/if_iwm_phy_ctxt.h>
161 #include <dev/iwm/if_iwm_time_event.h>
162 #include <dev/iwm/if_iwm_power.h>
163 #include <dev/iwm/if_iwm_scan.h>
164
165 #include <dev/iwm/if_iwm_pcie_trans.h>
166 #include <dev/iwm/if_iwm_led.h>
167
168 const uint8_t iwm_nvm_channels[] = {
169         /* 2.4 GHz */
170         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
171         /* 5 GHz */
172         36, 40, 44, 48, 52, 56, 60, 64,
173         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
174         149, 153, 157, 161, 165
175 };
176 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
177     "IWM_NUM_CHANNELS is too small");
178
179 const uint8_t iwm_nvm_channels_8000[] = {
180         /* 2.4 GHz */
181         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
182         /* 5 GHz */
183         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
184         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
185         149, 153, 157, 161, 165, 169, 173, 177, 181
186 };
187 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
188     "IWM_NUM_CHANNELS_8000 is too small");
189
190 #define IWM_NUM_2GHZ_CHANNELS   14
191 #define IWM_N_HW_ADDR_MASK      0xF
192
193 /*
194  * XXX For now, there's simply a fixed set of rate table entries
195  * that are populated.
196  */
197 const struct iwm_rate {
198         uint8_t rate;
199         uint8_t plcp;
200 } iwm_rates[] = {
201         {   2,  IWM_RATE_1M_PLCP  },
202         {   4,  IWM_RATE_2M_PLCP  },
203         {  11,  IWM_RATE_5M_PLCP  },
204         {  22,  IWM_RATE_11M_PLCP },
205         {  12,  IWM_RATE_6M_PLCP  },
206         {  18,  IWM_RATE_9M_PLCP  },
207         {  24,  IWM_RATE_12M_PLCP },
208         {  36,  IWM_RATE_18M_PLCP },
209         {  48,  IWM_RATE_24M_PLCP },
210         {  72,  IWM_RATE_36M_PLCP },
211         {  96,  IWM_RATE_48M_PLCP },
212         { 108,  IWM_RATE_54M_PLCP },
213 };
214 #define IWM_RIDX_CCK    0
215 #define IWM_RIDX_OFDM   4
216 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
217 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
218 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
219
220 struct iwm_nvm_section {
221         uint16_t length;
222         uint8_t *data;
223 };
224
225 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
226 static int      iwm_firmware_store_section(struct iwm_softc *,
227                                            enum iwm_ucode_type,
228                                            const uint8_t *, size_t);
229 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
230 static void     iwm_fw_info_free(struct iwm_fw_info *);
231 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
232 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
233 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
234                                      bus_size_t, bus_size_t);
235 static void     iwm_dma_contig_free(struct iwm_dma_info *);
236 static int      iwm_alloc_fwmem(struct iwm_softc *);
237 static int      iwm_alloc_sched(struct iwm_softc *);
238 static int      iwm_alloc_kw(struct iwm_softc *);
239 static int      iwm_alloc_ict(struct iwm_softc *);
240 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
241 static void     iwm_disable_rx_dma(struct iwm_softc *);
242 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
243 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
244 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
245                                   int);
246 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
247 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
248 static void     iwm_enable_interrupts(struct iwm_softc *);
249 static void     iwm_restore_interrupts(struct iwm_softc *);
250 static void     iwm_disable_interrupts(struct iwm_softc *);
251 static void     iwm_ict_reset(struct iwm_softc *);
252 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
253 static void     iwm_stop_device(struct iwm_softc *);
254 static void     iwm_mvm_nic_config(struct iwm_softc *);
255 static int      iwm_nic_rx_init(struct iwm_softc *);
256 static int      iwm_nic_tx_init(struct iwm_softc *);
257 static int      iwm_nic_init(struct iwm_softc *);
258 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
259 static int      iwm_post_alive(struct iwm_softc *);
260 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
261                                    uint16_t, uint8_t *, uint16_t *);
262 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
263                                      uint16_t *, size_t);
264 static uint32_t iwm_eeprom_channel_flags(uint16_t);
265 static void     iwm_add_channel_band(struct iwm_softc *,
266                     struct ieee80211_channel[], int, int *, int, size_t,
267                     const uint8_t[]);
268 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
269                     struct ieee80211_channel[]);
270 static int      iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
271                                    const uint16_t *, const uint16_t *,
272                                    const uint16_t *, const uint16_t *,
273                                    const uint16_t *);
274 static void     iwm_set_hw_address_8000(struct iwm_softc *,
275                                         struct iwm_nvm_data *,
276                                         const uint16_t *, const uint16_t *);
277 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
278                             const uint16_t *);
279 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
280 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
281                                   const uint16_t *);
282 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
283                                    const uint16_t *);
284 static void     iwm_set_radio_cfg(const struct iwm_softc *,
285                                   struct iwm_nvm_data *, uint32_t);
286 static int      iwm_parse_nvm_sections(struct iwm_softc *,
287                                        struct iwm_nvm_section *);
288 static int      iwm_nvm_init(struct iwm_softc *);
289 static int      iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
290                                        const uint8_t *, uint32_t);
291 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
292                                         const uint8_t *, uint32_t);
293 static int      iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
294 static int      iwm_load_cpu_sections_8000(struct iwm_softc *,
295                                            struct iwm_fw_sects *, int , int *);
296 static int      iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
297 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
298 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
299 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
300 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
301 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
302                                               enum iwm_ucode_type);
303 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
304 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
305 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
306 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
307                                             struct iwm_rx_phy_info *);
308 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
309                                       struct iwm_rx_packet *,
310                                       struct iwm_rx_data *);
311 static int      iwm_get_noise(struct iwm_softc *sc,
312                     const struct iwm_mvm_statistics_rx_non_phy *);
313 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
314                                    struct iwm_rx_data *);
315 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
316                                          struct iwm_rx_packet *,
317                                          struct iwm_node *);
318 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
319                                   struct iwm_rx_data *);
320 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
321 #if 0
322 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
323                                  uint16_t);
324 #endif
325 static const struct iwm_rate *
326         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
327                         struct ieee80211_frame *, struct iwm_tx_cmd *);
328 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
329                        struct ieee80211_node *, int);
330 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
331                              const struct ieee80211_bpf_params *);
332 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
333                                                 struct iwm_mvm_add_sta_cmd_v7 *,
334                                                 int *);
335 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
336                                        int);
337 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
338 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
339 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
340                                            struct iwm_int_sta *,
341                                            const uint8_t *, uint16_t, uint16_t);
342 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
343 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
344 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
345 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
346 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
347 static struct ieee80211_node *
348                 iwm_node_alloc(struct ieee80211vap *,
349                                const uint8_t[IEEE80211_ADDR_LEN]);
350 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
351 static int      iwm_media_change(struct ifnet *);
352 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
353 static void     iwm_endscan_cb(void *, int);
354 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
355                                         struct iwm_sf_cfg_cmd *,
356                                         struct ieee80211_node *);
357 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
358 static int      iwm_send_bt_init_conf(struct iwm_softc *);
359 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
360 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
361 static int      iwm_init_hw(struct iwm_softc *);
362 static void     iwm_init(struct iwm_softc *);
363 static void     iwm_start(struct iwm_softc *);
364 static void     iwm_stop(struct iwm_softc *);
365 static void     iwm_watchdog(void *);
366 static void     iwm_parent(struct ieee80211com *);
367 #ifdef IWM_DEBUG
368 static const char *
369                 iwm_desc_lookup(uint32_t);
370 static void     iwm_nic_error(struct iwm_softc *);
371 static void     iwm_nic_umac_error(struct iwm_softc *);
372 #endif
373 static void     iwm_notif_intr(struct iwm_softc *);
374 static void     iwm_intr(void *);
375 static int      iwm_attach(device_t);
376 static int      iwm_is_valid_ether_addr(uint8_t *);
377 static void     iwm_preinit(void *);
378 static int      iwm_detach_local(struct iwm_softc *sc, int);
379 static void     iwm_init_task(void *);
380 static void     iwm_radiotap_attach(struct iwm_softc *);
381 static struct ieee80211vap *
382                 iwm_vap_create(struct ieee80211com *,
383                                const char [IFNAMSIZ], int,
384                                enum ieee80211_opmode, int,
385                                const uint8_t [IEEE80211_ADDR_LEN],
386                                const uint8_t [IEEE80211_ADDR_LEN]);
387 static void     iwm_vap_delete(struct ieee80211vap *);
388 static void     iwm_scan_start(struct ieee80211com *);
389 static void     iwm_scan_end(struct ieee80211com *);
390 static void     iwm_update_mcast(struct ieee80211com *);
391 static void     iwm_set_channel(struct ieee80211com *);
392 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
393 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
394 static int      iwm_detach(device_t);
395
396 /*
397  * Firmware parser.
398  */
399
400 static int
401 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
402 {
403         const struct iwm_fw_cscheme_list *l = (const void *)data;
404
405         if (dlen < sizeof(*l) ||
406             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
407                 return EINVAL;
408
409         /* we don't actually store anything for now, always use s/w crypto */
410
411         return 0;
412 }
413
414 static int
415 iwm_firmware_store_section(struct iwm_softc *sc,
416     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
417 {
418         struct iwm_fw_sects *fws;
419         struct iwm_fw_onesect *fwone;
420
421         if (type >= IWM_UCODE_TYPE_MAX)
422                 return EINVAL;
423         if (dlen < sizeof(uint32_t))
424                 return EINVAL;
425
426         fws = &sc->sc_fw.fw_sects[type];
427         if (fws->fw_count >= IWM_UCODE_SECT_MAX)
428                 return EINVAL;
429
430         fwone = &fws->fw_sect[fws->fw_count];
431
432         /* first 32bit are device load offset */
433         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
434
435         /* rest is data */
436         fwone->fws_data = data + sizeof(uint32_t);
437         fwone->fws_len = dlen - sizeof(uint32_t);
438
439         fws->fw_count++;
440         fws->fw_totlen += fwone->fws_len;
441
442         return 0;
443 }
444
445 /* iwlwifi: iwl-drv.c */
446 struct iwm_tlv_calib_data {
447         uint32_t ucode_type;
448         struct iwm_tlv_calib_ctrl calib;
449 } __packed;
450
451 static int
452 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
453 {
454         const struct iwm_tlv_calib_data *def_calib = data;
455         uint32_t ucode_type = le32toh(def_calib->ucode_type);
456
457         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
458                 device_printf(sc->sc_dev,
459                     "Wrong ucode_type %u for default "
460                     "calibration.\n", ucode_type);
461                 return EINVAL;
462         }
463
464         sc->sc_default_calib[ucode_type].flow_trigger =
465             def_calib->calib.flow_trigger;
466         sc->sc_default_calib[ucode_type].event_trigger =
467             def_calib->calib.event_trigger;
468
469         return 0;
470 }
471
472 static void
473 iwm_fw_info_free(struct iwm_fw_info *fw)
474 {
475         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
476         fw->fw_fp = NULL;
477         /* don't touch fw->fw_status */
478         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
479 }
480
481 static int
482 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
483 {
484         struct iwm_fw_info *fw = &sc->sc_fw;
485         const struct iwm_tlv_ucode_header *uhdr;
486         struct iwm_ucode_tlv tlv;
487         enum iwm_ucode_tlv_type tlv_type;
488         const struct firmware *fwp;
489         const uint8_t *data;
490         int error = 0;
491         size_t len;
492
493         if (fw->fw_status == IWM_FW_STATUS_DONE &&
494             ucode_type != IWM_UCODE_TYPE_INIT)
495                 return 0;
496
497         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
498                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
499         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
500
501         if (fw->fw_fp != NULL)
502                 iwm_fw_info_free(fw);
503
504         /*
505          * Load firmware into driver memory.
506          * fw_fp will be set.
507          */
508         IWM_UNLOCK(sc);
509         fwp = firmware_get(sc->sc_fwname);
510         IWM_LOCK(sc);
511         if (fwp == NULL) {
512                 device_printf(sc->sc_dev,
513                     "could not read firmware %s (error %d)\n",
514                     sc->sc_fwname, error);
515                 goto out;
516         }
517         fw->fw_fp = fwp;
518
519         /* (Re-)Initialize default values. */
520         sc->sc_capaflags = 0;
521         sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
522         memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
523         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
524
525         /*
526          * Parse firmware contents
527          */
528
529         uhdr = (const void *)fw->fw_fp->data;
530         if (*(const uint32_t *)fw->fw_fp->data != 0
531             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
532                 device_printf(sc->sc_dev, "invalid firmware %s\n",
533                     sc->sc_fwname);
534                 error = EINVAL;
535                 goto out;
536         }
537
538         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
539             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
540             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
541             IWM_UCODE_API(le32toh(uhdr->ver)));
542         data = uhdr->data;
543         len = fw->fw_fp->datasize - sizeof(*uhdr);
544
545         while (len >= sizeof(tlv)) {
546                 size_t tlv_len;
547                 const void *tlv_data;
548
549                 memcpy(&tlv, data, sizeof(tlv));
550                 tlv_len = le32toh(tlv.length);
551                 tlv_type = le32toh(tlv.type);
552
553                 len -= sizeof(tlv);
554                 data += sizeof(tlv);
555                 tlv_data = data;
556
557                 if (len < tlv_len) {
558                         device_printf(sc->sc_dev,
559                             "firmware too short: %zu bytes\n",
560                             len);
561                         error = EINVAL;
562                         goto parse_out;
563                 }
564
565                 switch ((int)tlv_type) {
566                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
567                         if (tlv_len < sizeof(uint32_t)) {
568                                 device_printf(sc->sc_dev,
569                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
570                                     __func__,
571                                     (int) tlv_len);
572                                 error = EINVAL;
573                                 goto parse_out;
574                         }
575                         sc->sc_capa_max_probe_len
576                             = le32toh(*(const uint32_t *)tlv_data);
577                         /* limit it to something sensible */
578                         if (sc->sc_capa_max_probe_len >
579                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
580                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
581                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
582                                     "ridiculous\n", __func__);
583                                 error = EINVAL;
584                                 goto parse_out;
585                         }
586                         break;
587                 case IWM_UCODE_TLV_PAN:
588                         if (tlv_len) {
589                                 device_printf(sc->sc_dev,
590                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
591                                     __func__,
592                                     (int) tlv_len);
593                                 error = EINVAL;
594                                 goto parse_out;
595                         }
596                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
597                         break;
598                 case IWM_UCODE_TLV_FLAGS:
599                         if (tlv_len < sizeof(uint32_t)) {
600                                 device_printf(sc->sc_dev,
601                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
602                                     __func__,
603                                     (int) tlv_len);
604                                 error = EINVAL;
605                                 goto parse_out;
606                         }
607                         /*
608                          * Apparently there can be many flags, but Linux driver
609                          * parses only the first one, and so do we.
610                          *
611                          * XXX: why does this override IWM_UCODE_TLV_PAN?
612                          * Intentional or a bug?  Observations from
613                          * current firmware file:
614                          *  1) TLV_PAN is parsed first
615                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
616                          * ==> this resets TLV_PAN to itself... hnnnk
617                          */
618                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
619                         break;
620                 case IWM_UCODE_TLV_CSCHEME:
621                         if ((error = iwm_store_cscheme(sc,
622                             tlv_data, tlv_len)) != 0) {
623                                 device_printf(sc->sc_dev,
624                                     "%s: iwm_store_cscheme(): returned %d\n",
625                                     __func__,
626                                     error);
627                                 goto parse_out;
628                         }
629                         break;
630                 case IWM_UCODE_TLV_NUM_OF_CPU: {
631                         uint32_t num_cpu;
632                         if (tlv_len != sizeof(uint32_t)) {
633                                 device_printf(sc->sc_dev,
634                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
635                                     __func__,
636                                     (int) tlv_len);
637                                 error = EINVAL;
638                                 goto parse_out;
639                         }
640                         num_cpu = le32toh(*(const uint32_t *)tlv_data);
641                         if (num_cpu < 1 || num_cpu > 2) {
642                                 device_printf(sc->sc_dev,
643                                     "%s: Driver supports only 1 or 2 CPUs\n",
644                                     __func__);
645                                 error = EINVAL;
646                                 goto parse_out;
647                         }
648                         break;
649                 }
650                 case IWM_UCODE_TLV_SEC_RT:
651                         if ((error = iwm_firmware_store_section(sc,
652                             IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
653                                 device_printf(sc->sc_dev,
654                                     "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
655                                     __func__,
656                                     error);
657                                 goto parse_out;
658                         }
659                         break;
660                 case IWM_UCODE_TLV_SEC_INIT:
661                         if ((error = iwm_firmware_store_section(sc,
662                             IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
663                                 device_printf(sc->sc_dev,
664                                     "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
665                                     __func__,
666                                     error);
667                                 goto parse_out;
668                         }
669                         break;
670                 case IWM_UCODE_TLV_SEC_WOWLAN:
671                         if ((error = iwm_firmware_store_section(sc,
672                             IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
673                                 device_printf(sc->sc_dev,
674                                     "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
675                                     __func__,
676                                     error);
677                                 goto parse_out;
678                         }
679                         break;
680                 case IWM_UCODE_TLV_DEF_CALIB:
681                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
682                                 device_printf(sc->sc_dev,
683                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
684                                     __func__,
685                                     (int) tlv_len,
686                                     (int) sizeof(struct iwm_tlv_calib_data));
687                                 error = EINVAL;
688                                 goto parse_out;
689                         }
690                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
691                                 device_printf(sc->sc_dev,
692                                     "%s: iwm_set_default_calib() failed: %d\n",
693                                     __func__,
694                                     error);
695                                 goto parse_out;
696                         }
697                         break;
698                 case IWM_UCODE_TLV_PHY_SKU:
699                         if (tlv_len != sizeof(uint32_t)) {
700                                 error = EINVAL;
701                                 device_printf(sc->sc_dev,
702                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
703                                     __func__,
704                                     (int) tlv_len);
705                                 goto parse_out;
706                         }
707                         sc->sc_fw_phy_config =
708                             le32toh(*(const uint32_t *)tlv_data);
709                         break;
710
711                 case IWM_UCODE_TLV_API_CHANGES_SET: {
712                         const struct iwm_ucode_api *api;
713                         if (tlv_len != sizeof(*api)) {
714                                 error = EINVAL;
715                                 goto parse_out;
716                         }
717                         api = (const struct iwm_ucode_api *)tlv_data;
718                         /* Flags may exceed 32 bits in future firmware. */
719                         if (le32toh(api->api_index) > 0) {
720                                 device_printf(sc->sc_dev,
721                                     "unsupported API index %d\n",
722                                     le32toh(api->api_index));
723                                 goto parse_out;
724                         }
725                         sc->sc_ucode_api = le32toh(api->api_flags);
726                         break;
727                 }
728
729                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
730                         const struct iwm_ucode_capa *capa;
731                         int idx, i;
732                         if (tlv_len != sizeof(*capa)) {
733                                 error = EINVAL;
734                                 goto parse_out;
735                         }
736                         capa = (const struct iwm_ucode_capa *)tlv_data;
737                         idx = le32toh(capa->api_index);
738                         if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
739                                 device_printf(sc->sc_dev,
740                                     "unsupported API index %d\n", idx);
741                                 goto parse_out;
742                         }
743                         for (i = 0; i < 32; i++) {
744                                 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
745                                         continue;
746                                 setbit(sc->sc_enabled_capa, i + (32 * idx));
747                         }
748                         break;
749                 }
750
751                 case 48: /* undocumented TLV */
752                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
753                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
754                         /* ignore, not used by current driver */
755                         break;
756
757                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
758                         if ((error = iwm_firmware_store_section(sc,
759                             IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
760                             tlv_len)) != 0)
761                                 goto parse_out;
762                         break;
763
764                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
765                         if (tlv_len != sizeof(uint32_t)) {
766                                 error = EINVAL;
767                                 goto parse_out;
768                         }
769                         sc->sc_capa_n_scan_channels =
770                           le32toh(*(const uint32_t *)tlv_data);
771                         break;
772
773                 case IWM_UCODE_TLV_FW_VERSION:
774                         if (tlv_len != sizeof(uint32_t) * 3) {
775                                 error = EINVAL;
776                                 goto parse_out;
777                         }
778                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
779                             "%d.%d.%d",
780                             le32toh(((const uint32_t *)tlv_data)[0]),
781                             le32toh(((const uint32_t *)tlv_data)[1]),
782                             le32toh(((const uint32_t *)tlv_data)[2]));
783                         break;
784
785                 default:
786                         device_printf(sc->sc_dev,
787                             "%s: unknown firmware section %d, abort\n",
788                             __func__, tlv_type);
789                         error = EINVAL;
790                         goto parse_out;
791                 }
792
793                 len -= roundup(tlv_len, 4);
794                 data += roundup(tlv_len, 4);
795         }
796
797         KASSERT(error == 0, ("unhandled error"));
798
799  parse_out:
800         if (error) {
801                 device_printf(sc->sc_dev, "firmware parse error %d, "
802                     "section type %d\n", error, tlv_type);
803         }
804
805         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
806                 device_printf(sc->sc_dev,
807                     "device uses unsupported power ops\n");
808                 error = ENOTSUP;
809         }
810
811  out:
812         if (error) {
813                 fw->fw_status = IWM_FW_STATUS_NONE;
814                 if (fw->fw_fp != NULL)
815                         iwm_fw_info_free(fw);
816         } else
817                 fw->fw_status = IWM_FW_STATUS_DONE;
818         wakeup(&sc->sc_fw);
819
820         return error;
821 }
822
823 /*
824  * DMA resource routines
825  */
826
827 static void
828 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
829 {
830         if (error != 0)
831                 return;
832         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
833         *(bus_addr_t *)arg = segs[0].ds_addr;
834 }
835
836 static int
837 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
838     bus_size_t size, bus_size_t alignment)
839 {
840         int error;
841
842         dma->tag = NULL;
843         dma->map = NULL;
844         dma->size = size;
845         dma->vaddr = NULL;
846
847         error = bus_dma_tag_create(tag, alignment,
848             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
849             1, size, 0, NULL, NULL, &dma->tag);
850         if (error != 0)
851                 goto fail;
852
853         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
854             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
855         if (error != 0)
856                 goto fail;
857
858         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
859             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
860         if (error != 0) {
861                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
862                 dma->vaddr = NULL;
863                 goto fail;
864         }
865
866         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
867
868         return 0;
869
870 fail:
871         iwm_dma_contig_free(dma);
872
873         return error;
874 }
875
876 static void
877 iwm_dma_contig_free(struct iwm_dma_info *dma)
878 {
879         if (dma->vaddr != NULL) {
880                 bus_dmamap_sync(dma->tag, dma->map,
881                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
882                 bus_dmamap_unload(dma->tag, dma->map);
883                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
884                 dma->vaddr = NULL;
885         }
886         if (dma->tag != NULL) {
887                 bus_dma_tag_destroy(dma->tag);
888                 dma->tag = NULL;
889         }
890 }
891
892 /* fwmem is used to load firmware onto the card */
893 static int
894 iwm_alloc_fwmem(struct iwm_softc *sc)
895 {
896         /* Must be aligned on a 16-byte boundary. */
897         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
898             sc->sc_fwdmasegsz, 16);
899 }
900
901 /* tx scheduler rings.  not used? */
902 static int
903 iwm_alloc_sched(struct iwm_softc *sc)
904 {
905         /* TX scheduler rings must be aligned on a 1KB boundary. */
906         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
907             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
908 }
909
910 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
911 static int
912 iwm_alloc_kw(struct iwm_softc *sc)
913 {
914         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
915 }
916
917 /* interrupt cause table */
918 static int
919 iwm_alloc_ict(struct iwm_softc *sc)
920 {
921         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
922             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
923 }
924
925 static int
926 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
927 {
928         bus_size_t size;
929         int i, error;
930
931         ring->cur = 0;
932
933         /* Allocate RX descriptors (256-byte aligned). */
934         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
935         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
936         if (error != 0) {
937                 device_printf(sc->sc_dev,
938                     "could not allocate RX ring DMA memory\n");
939                 goto fail;
940         }
941         ring->desc = ring->desc_dma.vaddr;
942
943         /* Allocate RX status area (16-byte aligned). */
944         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
945             sizeof(*ring->stat), 16);
946         if (error != 0) {
947                 device_printf(sc->sc_dev,
948                     "could not allocate RX status DMA memory\n");
949                 goto fail;
950         }
951         ring->stat = ring->stat_dma.vaddr;
952
953         /* Create RX buffer DMA tag. */
954         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
955             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
956             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
957         if (error != 0) {
958                 device_printf(sc->sc_dev,
959                     "%s: could not create RX buf DMA tag, error %d\n",
960                     __func__, error);
961                 goto fail;
962         }
963
964         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
965         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
966         if (error != 0) {
967                 device_printf(sc->sc_dev,
968                     "%s: could not create RX buf DMA map, error %d\n",
969                     __func__, error);
970                 goto fail;
971         }
972         /*
973          * Allocate and map RX buffers.
974          */
975         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
976                 struct iwm_rx_data *data = &ring->data[i];
977                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
978                 if (error != 0) {
979                         device_printf(sc->sc_dev,
980                             "%s: could not create RX buf DMA map, error %d\n",
981                             __func__, error);
982                         goto fail;
983                 }
984                 data->m = NULL;
985
986                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
987                         goto fail;
988                 }
989         }
990         return 0;
991
992 fail:   iwm_free_rx_ring(sc, ring);
993         return error;
994 }
995
996 static void
997 iwm_disable_rx_dma(struct iwm_softc *sc)
998 {
999         /* XXX conditional nic locks are stupid */
1000         /* XXX print out if we can't lock the NIC? */
1001         if (iwm_nic_lock(sc)) {
1002                 /* XXX handle if RX stop doesn't finish? */
1003                 (void) iwm_pcie_rx_stop(sc);
1004                 iwm_nic_unlock(sc);
1005         }
1006 }
1007
1008 static void
1009 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1010 {
1011         /* Reset the ring state */
1012         ring->cur = 0;
1013
1014         /*
1015          * The hw rx ring index in shared memory must also be cleared,
1016          * otherwise the discrepancy can cause reprocessing chaos.
1017          */
1018         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1019 }
1020
1021 static void
1022 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1023 {
1024         int i;
1025
1026         iwm_dma_contig_free(&ring->desc_dma);
1027         iwm_dma_contig_free(&ring->stat_dma);
1028
1029         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1030                 struct iwm_rx_data *data = &ring->data[i];
1031
1032                 if (data->m != NULL) {
1033                         bus_dmamap_sync(ring->data_dmat, data->map,
1034                             BUS_DMASYNC_POSTREAD);
1035                         bus_dmamap_unload(ring->data_dmat, data->map);
1036                         m_freem(data->m);
1037                         data->m = NULL;
1038                 }
1039                 if (data->map != NULL) {
1040                         bus_dmamap_destroy(ring->data_dmat, data->map);
1041                         data->map = NULL;
1042                 }
1043         }
1044         if (ring->spare_map != NULL) {
1045                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1046                 ring->spare_map = NULL;
1047         }
1048         if (ring->data_dmat != NULL) {
1049                 bus_dma_tag_destroy(ring->data_dmat);
1050                 ring->data_dmat = NULL;
1051         }
1052 }
1053
1054 static int
1055 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1056 {
1057         bus_addr_t paddr;
1058         bus_size_t size;
1059         size_t maxsize;
1060         int nsegments;
1061         int i, error;
1062
1063         ring->qid = qid;
1064         ring->queued = 0;
1065         ring->cur = 0;
1066
1067         /* Allocate TX descriptors (256-byte aligned). */
1068         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1069         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1070         if (error != 0) {
1071                 device_printf(sc->sc_dev,
1072                     "could not allocate TX ring DMA memory\n");
1073                 goto fail;
1074         }
1075         ring->desc = ring->desc_dma.vaddr;
1076
1077         /*
1078          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1079          * to allocate commands space for other rings.
1080          */
1081         if (qid > IWM_MVM_CMD_QUEUE)
1082                 return 0;
1083
1084         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1085         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1086         if (error != 0) {
1087                 device_printf(sc->sc_dev,
1088                     "could not allocate TX cmd DMA memory\n");
1089                 goto fail;
1090         }
1091         ring->cmd = ring->cmd_dma.vaddr;
1092
1093         /* FW commands may require more mapped space than packets. */
1094         if (qid == IWM_MVM_CMD_QUEUE) {
1095                 maxsize = IWM_RBUF_SIZE;
1096                 nsegments = 1;
1097         } else {
1098                 maxsize = MCLBYTES;
1099                 nsegments = IWM_MAX_SCATTER - 2;
1100         }
1101
1102         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1103             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1104             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1105         if (error != 0) {
1106                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1107                 goto fail;
1108         }
1109
1110         paddr = ring->cmd_dma.paddr;
1111         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1112                 struct iwm_tx_data *data = &ring->data[i];
1113
1114                 data->cmd_paddr = paddr;
1115                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1116                     + offsetof(struct iwm_tx_cmd, scratch);
1117                 paddr += sizeof(struct iwm_device_cmd);
1118
1119                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1120                 if (error != 0) {
1121                         device_printf(sc->sc_dev,
1122                             "could not create TX buf DMA map\n");
1123                         goto fail;
1124                 }
1125         }
1126         KASSERT(paddr == ring->cmd_dma.paddr + size,
1127             ("invalid physical address"));
1128         return 0;
1129
1130 fail:   iwm_free_tx_ring(sc, ring);
1131         return error;
1132 }
1133
1134 static void
1135 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1136 {
1137         int i;
1138
1139         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1140                 struct iwm_tx_data *data = &ring->data[i];
1141
1142                 if (data->m != NULL) {
1143                         bus_dmamap_sync(ring->data_dmat, data->map,
1144                             BUS_DMASYNC_POSTWRITE);
1145                         bus_dmamap_unload(ring->data_dmat, data->map);
1146                         m_freem(data->m);
1147                         data->m = NULL;
1148                 }
1149         }
1150         /* Clear TX descriptors. */
1151         memset(ring->desc, 0, ring->desc_dma.size);
1152         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1153             BUS_DMASYNC_PREWRITE);
1154         sc->qfullmsk &= ~(1 << ring->qid);
1155         ring->queued = 0;
1156         ring->cur = 0;
1157 }
1158
1159 static void
1160 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1161 {
1162         int i;
1163
1164         iwm_dma_contig_free(&ring->desc_dma);
1165         iwm_dma_contig_free(&ring->cmd_dma);
1166
1167         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1168                 struct iwm_tx_data *data = &ring->data[i];
1169
1170                 if (data->m != NULL) {
1171                         bus_dmamap_sync(ring->data_dmat, data->map,
1172                             BUS_DMASYNC_POSTWRITE);
1173                         bus_dmamap_unload(ring->data_dmat, data->map);
1174                         m_freem(data->m);
1175                         data->m = NULL;
1176                 }
1177                 if (data->map != NULL) {
1178                         bus_dmamap_destroy(ring->data_dmat, data->map);
1179                         data->map = NULL;
1180                 }
1181         }
1182         if (ring->data_dmat != NULL) {
1183                 bus_dma_tag_destroy(ring->data_dmat);
1184                 ring->data_dmat = NULL;
1185         }
1186 }
1187
1188 /*
1189  * High-level hardware frobbing routines
1190  */
1191
1192 static void
1193 iwm_enable_interrupts(struct iwm_softc *sc)
1194 {
1195         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1196         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1197 }
1198
1199 static void
1200 iwm_restore_interrupts(struct iwm_softc *sc)
1201 {
1202         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1203 }
1204
1205 static void
1206 iwm_disable_interrupts(struct iwm_softc *sc)
1207 {
1208         /* disable interrupts */
1209         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1210
1211         /* acknowledge all interrupts */
1212         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1213         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1214 }
1215
1216 static void
1217 iwm_ict_reset(struct iwm_softc *sc)
1218 {
1219         iwm_disable_interrupts(sc);
1220
1221         /* Reset ICT table. */
1222         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1223         sc->ict_cur = 0;
1224
1225         /* Set physical address of ICT table (4KB aligned). */
1226         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1227             IWM_CSR_DRAM_INT_TBL_ENABLE
1228             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1229             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1230             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1231
1232         /* Switch to ICT interrupt mode in driver. */
1233         sc->sc_flags |= IWM_FLAG_USE_ICT;
1234
1235         /* Re-enable interrupts. */
1236         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1237         iwm_enable_interrupts(sc);
1238 }
1239
1240 /* iwlwifi pcie/trans.c */
1241
1242 /*
1243  * Since this .. hard-resets things, it's time to actually
1244  * mark the first vap (if any) as having no mac context.
1245  * It's annoying, but since the driver is potentially being
1246  * stop/start'ed whilst active (thanks openbsd port!) we
1247  * have to correctly track this.
1248  */
1249 static void
1250 iwm_stop_device(struct iwm_softc *sc)
1251 {
1252         struct ieee80211com *ic = &sc->sc_ic;
1253         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1254         int chnl, qid;
1255         uint32_t mask = 0;
1256
1257         /* tell the device to stop sending interrupts */
1258         iwm_disable_interrupts(sc);
1259
1260         /*
1261          * FreeBSD-local: mark the first vap as not-uploaded,
1262          * so the next transition through auth/assoc
1263          * will correctly populate the MAC context.
1264          */
1265         if (vap) {
1266                 struct iwm_vap *iv = IWM_VAP(vap);
1267                 iv->is_uploaded = 0;
1268         }
1269
1270         /* device going down, Stop using ICT table */
1271         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1272
1273         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1274
1275         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1276
1277         if (iwm_nic_lock(sc)) {
1278                 /* Stop each Tx DMA channel */
1279                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1280                         IWM_WRITE(sc,
1281                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1282                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1283                 }
1284
1285                 /* Wait for DMA channels to be idle */
1286                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1287                     5000)) {
1288                         device_printf(sc->sc_dev,
1289                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1290                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1291                 }
1292                 iwm_nic_unlock(sc);
1293         }
1294         iwm_disable_rx_dma(sc);
1295
1296         /* Stop RX ring. */
1297         iwm_reset_rx_ring(sc, &sc->rxq);
1298
1299         /* Reset all TX rings. */
1300         for (qid = 0; qid < nitems(sc->txq); qid++)
1301                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1302
1303         /*
1304          * Power-down device's busmaster DMA clocks
1305          */
1306         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1307         DELAY(5);
1308
1309         /* Make sure (redundant) we've released our request to stay awake */
1310         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1311             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1312
1313         /* Stop the device, and put it in low power state */
1314         iwm_apm_stop(sc);
1315
1316         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1317          * Clean again the interrupt here
1318          */
1319         iwm_disable_interrupts(sc);
1320         /* stop and reset the on-board processor */
1321         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1322
1323         /*
1324          * Even if we stop the HW, we still want the RF kill
1325          * interrupt
1326          */
1327         iwm_enable_rfkill_int(sc);
1328         iwm_check_rfkill(sc);
1329 }
1330
1331 /* iwlwifi: mvm/ops.c */
1332 static void
1333 iwm_mvm_nic_config(struct iwm_softc *sc)
1334 {
1335         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1336         uint32_t reg_val = 0;
1337
1338         radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1339             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1340         radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1341             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1342         radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1343             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1344
1345         /* SKU control */
1346         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1347             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1348         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1349             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1350
1351         /* radio configuration */
1352         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1353         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1354         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1355
1356         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1357
1358         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1359             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1360             radio_cfg_step, radio_cfg_dash);
1361
1362         /*
1363          * W/A : NIC is stuck in a reset state after Early PCIe power off
1364          * (PCIe power is lost before PERST# is asserted), causing ME FW
1365          * to lose ownership and not being able to obtain it back.
1366          */
1367         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1368                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1369                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1370                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1371         }
1372 }
1373
1374 static int
1375 iwm_nic_rx_init(struct iwm_softc *sc)
1376 {
1377         if (!iwm_nic_lock(sc))
1378                 return EBUSY;
1379
1380         /*
1381          * Initialize RX ring.  This is from the iwn driver.
1382          */
1383         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1384
1385         /* stop DMA */
1386         iwm_disable_rx_dma(sc);
1387         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1388         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1389         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1390         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1391
1392         /* Set physical address of RX ring (256-byte aligned). */
1393         IWM_WRITE(sc,
1394             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1395
1396         /* Set physical address of RX status (16-byte aligned). */
1397         IWM_WRITE(sc,
1398             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1399
1400         /* Enable RX. */
1401         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1402             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1403             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1404             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1405             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1406             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1407             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1408             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1409
1410         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1411
1412         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1413         if (sc->host_interrupt_operation_mode)
1414                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1415
1416         /*
1417          * Thus sayeth el jefe (iwlwifi) via a comment:
1418          *
1419          * This value should initially be 0 (before preparing any
1420          * RBs), should be 8 after preparing the first 8 RBs (for example)
1421          */
1422         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1423
1424         iwm_nic_unlock(sc);
1425
1426         return 0;
1427 }
1428
1429 static int
1430 iwm_nic_tx_init(struct iwm_softc *sc)
1431 {
1432         int qid;
1433
1434         if (!iwm_nic_lock(sc))
1435                 return EBUSY;
1436
1437         /* Deactivate TX scheduler. */
1438         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1439
1440         /* Set physical address of "keep warm" page (16-byte aligned). */
1441         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1442
1443         /* Initialize TX rings. */
1444         for (qid = 0; qid < nitems(sc->txq); qid++) {
1445                 struct iwm_tx_ring *txq = &sc->txq[qid];
1446
1447                 /* Set physical address of TX ring (256-byte aligned). */
1448                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1449                     txq->desc_dma.paddr >> 8);
1450                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1451                     "%s: loading ring %d descriptors (%p) at %lx\n",
1452                     __func__,
1453                     qid, txq->desc,
1454                     (unsigned long) (txq->desc_dma.paddr >> 8));
1455         }
1456
1457         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1458
1459         iwm_nic_unlock(sc);
1460
1461         return 0;
1462 }
1463
1464 static int
1465 iwm_nic_init(struct iwm_softc *sc)
1466 {
1467         int error;
1468
1469         iwm_apm_init(sc);
1470         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1471                 iwm_set_pwr(sc);
1472
1473         iwm_mvm_nic_config(sc);
1474
1475         if ((error = iwm_nic_rx_init(sc)) != 0)
1476                 return error;
1477
1478         /*
1479          * Ditto for TX, from iwn
1480          */
1481         if ((error = iwm_nic_tx_init(sc)) != 0)
1482                 return error;
1483
1484         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1485             "%s: shadow registers enabled\n", __func__);
1486         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1487
1488         return 0;
1489 }
1490
1491 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1492         IWM_MVM_TX_FIFO_VO,
1493         IWM_MVM_TX_FIFO_VI,
1494         IWM_MVM_TX_FIFO_BE,
1495         IWM_MVM_TX_FIFO_BK,
1496 };
1497
1498 static int
1499 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1500 {
1501         if (!iwm_nic_lock(sc)) {
1502                 device_printf(sc->sc_dev,
1503                     "%s: cannot enable txq %d\n",
1504                     __func__,
1505                     qid);
1506                 return EBUSY;
1507         }
1508
1509         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1510
1511         if (qid == IWM_MVM_CMD_QUEUE) {
1512                 /* unactivate before configuration */
1513                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1514                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1515                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1516
1517                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1518
1519                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1520
1521                 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1522                 /* Set scheduler window size and frame limit. */
1523                 iwm_write_mem32(sc,
1524                     sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1525                     sizeof(uint32_t),
1526                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1527                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1528                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1529                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1530
1531                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1532                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1533                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1534                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1535                     IWM_SCD_QUEUE_STTS_REG_MSK);
1536         } else {
1537                 struct iwm_scd_txq_cfg_cmd cmd;
1538                 int error;
1539
1540                 iwm_nic_unlock(sc);
1541
1542                 memset(&cmd, 0, sizeof(cmd));
1543                 cmd.scd_queue = qid;
1544                 cmd.enable = 1;
1545                 cmd.sta_id = sta_id;
1546                 cmd.tx_fifo = fifo;
1547                 cmd.aggregate = 0;
1548                 cmd.window = IWM_FRAME_LIMIT;
1549
1550                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1551                     sizeof(cmd), &cmd);
1552                 if (error) {
1553                         device_printf(sc->sc_dev,
1554                             "cannot enable txq %d\n", qid);
1555                         return error;
1556                 }
1557
1558                 if (!iwm_nic_lock(sc))
1559                         return EBUSY;
1560         }
1561
1562         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1563             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1564
1565         iwm_nic_unlock(sc);
1566
1567         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1568             __func__, qid, fifo);
1569
1570         return 0;
1571 }
1572
1573 static int
1574 iwm_post_alive(struct iwm_softc *sc)
1575 {
1576         int nwords;
1577         int error, chnl;
1578         uint32_t base;
1579
1580         if (!iwm_nic_lock(sc))
1581                 return EBUSY;
1582
1583         base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1584         if (sc->sched_base != base) {
1585                 device_printf(sc->sc_dev,
1586                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1587                     __func__, sc->sched_base, base);
1588         }
1589
1590         iwm_ict_reset(sc);
1591
1592         /* Clear TX scheduler state in SRAM. */
1593         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1594             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1595             / sizeof(uint32_t);
1596         error = iwm_write_mem(sc,
1597             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1598             NULL, nwords);
1599         if (error)
1600                 goto out;
1601
1602         /* Set physical address of TX scheduler rings (1KB aligned). */
1603         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1604
1605         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1606
1607         iwm_nic_unlock(sc);
1608
1609         /* enable command channel */
1610         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1611         if (error)
1612                 return error;
1613
1614         if (!iwm_nic_lock(sc))
1615                 return EBUSY;
1616
1617         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1618
1619         /* Enable DMA channels. */
1620         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1621                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1622                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1623                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1624         }
1625
1626         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1627             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1628
1629         /* Enable L1-Active */
1630         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1631                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1632                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1633         }
1634
1635  out:
1636         iwm_nic_unlock(sc);
1637         return error;
1638 }
1639
1640 /*
1641  * NVM read access and content parsing.  We do not support
1642  * external NVM or writing NVM.
1643  * iwlwifi/mvm/nvm.c
1644  */
1645
1646 /* list of NVM sections we are allowed/need to read */
1647 const int nvm_to_read[] = {
1648         IWM_NVM_SECTION_TYPE_HW,
1649         IWM_NVM_SECTION_TYPE_SW,
1650         IWM_NVM_SECTION_TYPE_REGULATORY,
1651         IWM_NVM_SECTION_TYPE_CALIBRATION,
1652         IWM_NVM_SECTION_TYPE_PRODUCTION,
1653         IWM_NVM_SECTION_TYPE_HW_8000,
1654         IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
1655         IWM_NVM_SECTION_TYPE_PHY_SKU,
1656 };
1657
1658 /* Default NVM size to read */
1659 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1660 #define IWM_MAX_NVM_SECTION_SIZE        8192
1661
1662 #define IWM_NVM_WRITE_OPCODE 1
1663 #define IWM_NVM_READ_OPCODE 0
1664
1665 /* load nvm chunk response */
1666 #define IWM_READ_NVM_CHUNK_SUCCEED              0
1667 #define IWM_READ_NVM_CHUNK_INVALID_ADDRESS      1
1668
1669 static int
1670 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1671         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1672 {
1673         offset = 0;
1674         struct iwm_nvm_access_cmd nvm_access_cmd = {
1675                 .offset = htole16(offset),
1676                 .length = htole16(length),
1677                 .type = htole16(section),
1678                 .op_code = IWM_NVM_READ_OPCODE,
1679         };
1680         struct iwm_nvm_access_resp *nvm_resp;
1681         struct iwm_rx_packet *pkt;
1682         struct iwm_host_cmd cmd = {
1683                 .id = IWM_NVM_ACCESS_CMD,
1684                 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1685                     IWM_CMD_SEND_IN_RFKILL,
1686                 .data = { &nvm_access_cmd, },
1687         };
1688         int ret, offset_read;
1689         size_t bytes_read;
1690         uint8_t *resp_data;
1691
1692         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1693
1694         ret = iwm_send_cmd(sc, &cmd);
1695         if (ret) {
1696                 device_printf(sc->sc_dev,
1697                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1698                 return ret;
1699         }
1700
1701         pkt = cmd.resp_pkt;
1702         if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1703                 device_printf(sc->sc_dev,
1704                     "Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1705                     pkt->hdr.flags);
1706                 ret = EIO;
1707                 goto exit;
1708         }
1709
1710         /* Extract NVM response */
1711         nvm_resp = (void *)pkt->data;
1712
1713         ret = le16toh(nvm_resp->status);
1714         bytes_read = le16toh(nvm_resp->length);
1715         offset_read = le16toh(nvm_resp->offset);
1716         resp_data = nvm_resp->data;
1717         if (ret) {
1718                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1719                     "NVM access command failed with status %d\n", ret);
1720                 ret = EINVAL;
1721                 goto exit;
1722         }
1723
1724         if (offset_read != offset) {
1725                 device_printf(sc->sc_dev,
1726                     "NVM ACCESS response with invalid offset %d\n",
1727                     offset_read);
1728                 ret = EINVAL;
1729                 goto exit;
1730         }
1731
1732         if (bytes_read > length) {
1733                 device_printf(sc->sc_dev,
1734                     "NVM ACCESS response with too much data "
1735                     "(%d bytes requested, %zd bytes received)\n",
1736                     length, bytes_read);
1737                 ret = EINVAL;
1738                 goto exit;
1739         }
1740
1741         memcpy(data + offset, resp_data, bytes_read);
1742         *len = bytes_read;
1743
1744  exit:
1745         iwm_free_resp(sc, &cmd);
1746         return ret;
1747 }
1748
1749 /*
1750  * Reads an NVM section completely.
1751  * NICs prior to 7000 family don't have a real NVM, but just read
1752  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1753  * by uCode, we need to manually check in this case that we don't
1754  * overflow and try to read more than the EEPROM size.
1755  * For 7000 family NICs, we supply the maximal size we can read, and
1756  * the uCode fills the response with as much data as we can,
1757  * without overflowing, so no check is needed.
1758  */
1759 static int
1760 iwm_nvm_read_section(struct iwm_softc *sc,
1761         uint16_t section, uint8_t *data, uint16_t *len, size_t max_len)
1762 {
1763         uint16_t chunklen, seglen;
1764         int error = 0;
1765
1766         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1767             "reading NVM section %d\n", section);
1768
1769         chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1770         *len = 0;
1771
1772         /* Read NVM chunks until exhausted (reading less than requested) */
1773         while (seglen == chunklen && *len < max_len) {
1774                 error = iwm_nvm_read_chunk(sc,
1775                     section, *len, chunklen, data, &seglen);
1776                 if (error) {
1777                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1778                             "Cannot read from NVM section "
1779                             "%d at offset %d\n", section, *len);
1780                         return error;
1781                 }
1782                 *len += seglen;
1783         }
1784
1785         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1786             "NVM section %d read completed (%d bytes, error=%d)\n",
1787             section, *len, error);
1788         return error;
1789 }
1790
1791 /*
1792  * BEGIN IWM_NVM_PARSE
1793  */
1794
1795 /* iwlwifi/iwl-nvm-parse.c */
1796
1797 /* NVM offsets (in words) definitions */
1798 enum iwm_nvm_offsets {
1799         /* NVM HW-Section offset (in words) definitions */
1800         IWM_HW_ADDR = 0x15,
1801
1802 /* NVM SW-Section offset (in words) definitions */
1803         IWM_NVM_SW_SECTION = 0x1C0,
1804         IWM_NVM_VERSION = 0,
1805         IWM_RADIO_CFG = 1,
1806         IWM_SKU = 2,
1807         IWM_N_HW_ADDRS = 3,
1808         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1809
1810 /* NVM calibration section offset (in words) definitions */
1811         IWM_NVM_CALIB_SECTION = 0x2B8,
1812         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1813 };
1814
1815 enum iwm_8000_nvm_offsets {
1816         /* NVM HW-Section offset (in words) definitions */
1817         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1818         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1819         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1820         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1821         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1822
1823         /* NVM SW-Section offset (in words) definitions */
1824         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1825         IWM_NVM_VERSION_8000 = 0,
1826         IWM_RADIO_CFG_8000 = 0,
1827         IWM_SKU_8000 = 2,
1828         IWM_N_HW_ADDRS_8000 = 3,
1829
1830         /* NVM REGULATORY -Section offset (in words) definitions */
1831         IWM_NVM_CHANNELS_8000 = 0,
1832         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1833         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1834         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1835
1836         /* NVM calibration section offset (in words) definitions */
1837         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1838         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1839 };
1840
1841 /* SKU Capabilities (actual values from NVM definition) */
1842 enum nvm_sku_bits {
1843         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1844         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1845         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1846         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1847 };
1848
1849 /* radio config bits (actual values from NVM definition) */
1850 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1851 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1852 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1853 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1854 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1855 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1856
1857 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1858 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1859 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1860 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1861 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1862 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1863
1864 #define DEFAULT_MAX_TX_POWER 16
1865
1866 /**
1867  * enum iwm_nvm_channel_flags - channel flags in NVM
1868  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1869  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1870  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1871  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1872  * XXX cannot find this (DFS) flag in iwl-nvm-parse.c
1873  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1874  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1875  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1876  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1877  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1878  */
1879 enum iwm_nvm_channel_flags {
1880         IWM_NVM_CHANNEL_VALID = (1 << 0),
1881         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1882         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1883         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1884         IWM_NVM_CHANNEL_DFS = (1 << 7),
1885         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1886         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1887         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1888         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1889 };
1890
1891 /*
1892  * Translate EEPROM flags to net80211.
1893  */
1894 static uint32_t
1895 iwm_eeprom_channel_flags(uint16_t ch_flags)
1896 {
1897         uint32_t nflags;
1898
1899         nflags = 0;
1900         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1901                 nflags |= IEEE80211_CHAN_PASSIVE;
1902         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1903                 nflags |= IEEE80211_CHAN_NOADHOC;
1904         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1905                 nflags |= IEEE80211_CHAN_DFS;
1906                 /* Just in case. */
1907                 nflags |= IEEE80211_CHAN_NOADHOC;
1908         }
1909
1910         return (nflags);
1911 }
1912
1913 static void
1914 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1915     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1916     const uint8_t bands[])
1917 {
1918         const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
1919         uint32_t nflags;
1920         uint16_t ch_flags;
1921         uint8_t ieee;
1922         int error;
1923
1924         for (; ch_idx < ch_num; ch_idx++) {
1925                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1926                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1927                         ieee = iwm_nvm_channels[ch_idx];
1928                 else
1929                         ieee = iwm_nvm_channels_8000[ch_idx];
1930
1931                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1932                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1933                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
1934                             ieee, ch_flags,
1935                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1936                             "5.2" : "2.4");
1937                         continue;
1938                 }
1939
1940                 nflags = iwm_eeprom_channel_flags(ch_flags);
1941                 error = ieee80211_add_channel(chans, maxchans, nchans,
1942                     ieee, 0, 0, nflags, bands);
1943                 if (error != 0)
1944                         break;
1945
1946                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1947                     "Ch. %d Flags %x [%sGHz] - Added\n",
1948                     ieee, ch_flags,
1949                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1950                     "5.2" : "2.4");
1951         }
1952 }
1953
1954 static void
1955 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1956     struct ieee80211_channel chans[])
1957 {
1958         struct iwm_softc *sc = ic->ic_softc;
1959         struct iwm_nvm_data *data = &sc->sc_nvm;
1960         uint8_t bands[IEEE80211_MODE_BYTES];
1961         size_t ch_num;
1962
1963         memset(bands, 0, sizeof(bands));
1964         /* 1-13: 11b/g channels. */
1965         setbit(bands, IEEE80211_MODE_11B);
1966         setbit(bands, IEEE80211_MODE_11G);
1967         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1968             IWM_NUM_2GHZ_CHANNELS - 1, bands);
1969
1970         /* 14: 11b channel only. */
1971         clrbit(bands, IEEE80211_MODE_11G);
1972         iwm_add_channel_band(sc, chans, maxchans, nchans,
1973             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
1974
1975         if (data->sku_cap_band_52GHz_enable) {
1976                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1977                         ch_num = nitems(iwm_nvm_channels);
1978                 else
1979                         ch_num = nitems(iwm_nvm_channels_8000);
1980                 memset(bands, 0, sizeof(bands));
1981                 setbit(bands, IEEE80211_MODE_11A);
1982                 iwm_add_channel_band(sc, chans, maxchans, nchans,
1983                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
1984         }
1985 }
1986
1987 static void
1988 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
1989         const uint16_t *mac_override, const uint16_t *nvm_hw)
1990 {
1991         const uint8_t *hw_addr;
1992
1993         if (mac_override) {
1994                 static const uint8_t reserved_mac[] = {
1995                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
1996                 };
1997
1998                 hw_addr = (const uint8_t *)(mac_override +
1999                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2000
2001                 /*
2002                  * Store the MAC address from MAO section.
2003                  * No byte swapping is required in MAO section
2004                  */
2005                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2006
2007                 /*
2008                  * Force the use of the OTP MAC address in case of reserved MAC
2009                  * address in the NVM, or if address is given but invalid.
2010                  */
2011                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2012                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2013                     iwm_is_valid_ether_addr(data->hw_addr) &&
2014                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2015                         return;
2016
2017                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2018                     "%s: mac address from nvm override section invalid\n",
2019                     __func__);
2020         }
2021
2022         if (nvm_hw) {
2023                 /* read the mac address from WFMP registers */
2024                 uint32_t mac_addr0 =
2025                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2026                 uint32_t mac_addr1 =
2027                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2028
2029                 hw_addr = (const uint8_t *)&mac_addr0;
2030                 data->hw_addr[0] = hw_addr[3];
2031                 data->hw_addr[1] = hw_addr[2];
2032                 data->hw_addr[2] = hw_addr[1];
2033                 data->hw_addr[3] = hw_addr[0];
2034
2035                 hw_addr = (const uint8_t *)&mac_addr1;
2036                 data->hw_addr[4] = hw_addr[1];
2037                 data->hw_addr[5] = hw_addr[0];
2038
2039                 return;
2040         }
2041
2042         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2043         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2044 }
2045
2046 static int
2047 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2048             const uint16_t *phy_sku)
2049 {
2050         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2051                 return le16_to_cpup(nvm_sw + IWM_SKU);
2052
2053         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2054 }
2055
2056 static int
2057 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2058 {
2059         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2060                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2061         else
2062                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2063                                                 IWM_NVM_VERSION_8000));
2064 }
2065
2066 static int
2067 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2068                   const uint16_t *phy_sku)
2069 {
2070         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2071                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2072
2073         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2074 }
2075
2076 static int
2077 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2078 {
2079         int n_hw_addr;
2080
2081         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2082                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2083
2084         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2085
2086         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2087 }
2088
2089 static void
2090 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2091                   uint32_t radio_cfg)
2092 {
2093         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2094                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2095                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2096                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2097                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2098                 return;
2099         }
2100
2101         /* set the radio configuration for family 8000 */
2102         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2103         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2104         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2105         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2106         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2107         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2108 }
2109
2110 static int
2111 iwm_parse_nvm_data(struct iwm_softc *sc,
2112                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2113                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2114                    const uint16_t *phy_sku, const uint16_t *regulatory)
2115 {
2116         struct iwm_nvm_data *data = &sc->sc_nvm;
2117         uint8_t hw_addr[IEEE80211_ADDR_LEN];
2118         uint32_t sku, radio_cfg;
2119
2120         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2121
2122         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2123         iwm_set_radio_cfg(sc, data, radio_cfg);
2124
2125         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2126         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2127         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2128         data->sku_cap_11n_enable = 0;
2129
2130         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2131
2132         /* The byte order is little endian 16 bit, meaning 214365 */
2133         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2134                 IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
2135                 data->hw_addr[0] = hw_addr[1];
2136                 data->hw_addr[1] = hw_addr[0];
2137                 data->hw_addr[2] = hw_addr[3];
2138                 data->hw_addr[3] = hw_addr[2];
2139                 data->hw_addr[4] = hw_addr[5];
2140                 data->hw_addr[5] = hw_addr[4];
2141         } else {
2142                 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2143         }
2144
2145         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2146                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2147                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2148         } else {
2149                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2150                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2151         }
2152         data->calib_version = 255;   /* TODO:
2153                                         this value will prevent some checks from
2154                                         failing, we need to check if this
2155                                         field is still needed, and if it does,
2156                                         where is it in the NVM */
2157
2158         return 0;
2159 }
2160
2161 /*
2162  * END NVM PARSE
2163  */
2164
2165 static int
2166 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2167 {
2168         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2169
2170         /* Checking for required sections */
2171         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2172                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2173                     !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2174                         device_printf(sc->sc_dev,
2175                             "Can't parse empty OTP/NVM sections\n");
2176                         return ENOENT;
2177                 }
2178
2179                 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2180         } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2181                 /* SW and REGULATORY sections are mandatory */
2182                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2183                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2184                         device_printf(sc->sc_dev,
2185                             "Can't parse empty OTP/NVM sections\n");
2186                         return ENOENT;
2187                 }
2188                 /* MAC_OVERRIDE or at least HW section must exist */
2189                 if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2190                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2191                         device_printf(sc->sc_dev,
2192                             "Can't parse mac_address, empty sections\n");
2193                         return ENOENT;
2194                 }
2195
2196                 /* PHY_SKU section is mandatory in B0 */
2197                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2198                         device_printf(sc->sc_dev,
2199                             "Can't parse phy_sku in B0, empty sections\n");
2200                         return ENOENT;
2201                 }
2202
2203                 hw = (const uint16_t *)
2204                     sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2205         } else {
2206                 panic("unknown device family %d\n", sc->sc_device_family);
2207         }
2208
2209         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2210         calib = (const uint16_t *)
2211             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2212         regulatory = (const uint16_t *)
2213             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2214         mac_override = (const uint16_t *)
2215             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2216         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2217
2218         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2219             phy_sku, regulatory);
2220 }
2221
2222 static int
2223 iwm_nvm_init(struct iwm_softc *sc)
2224 {
2225         struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2226         int i, section, error;
2227         uint16_t len;
2228         uint8_t *buf;
2229         const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
2230
2231         memset(nvm_sections, 0 , sizeof(nvm_sections));
2232
2233         buf = malloc(bufsz, M_DEVBUF, M_NOWAIT);
2234         if (buf == NULL)
2235                 return ENOMEM;
2236
2237         for (i = 0; i < nitems(nvm_to_read); i++) {
2238                 section = nvm_to_read[i];
2239                 KASSERT(section <= nitems(nvm_sections),
2240                     ("too many sections"));
2241
2242                 error = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2243                 if (error) {
2244                         error = 0;
2245                         continue;
2246                 }
2247                 nvm_sections[section].data = malloc(len, M_DEVBUF, M_NOWAIT);
2248                 if (nvm_sections[section].data == NULL) {
2249                         error = ENOMEM;
2250                         break;
2251                 }
2252                 memcpy(nvm_sections[section].data, buf, len);
2253                 nvm_sections[section].length = len;
2254         }
2255         free(buf, M_DEVBUF);
2256         if (error == 0)
2257                 error = iwm_parse_nvm_sections(sc, nvm_sections);
2258
2259         for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2260                 if (nvm_sections[i].data != NULL)
2261                         free(nvm_sections[i].data, M_DEVBUF);
2262         }
2263
2264         return error;
2265 }
2266
2267 /*
2268  * Firmware loading gunk.  This is kind of a weird hybrid between the
2269  * iwn driver and the Linux iwlwifi driver.
2270  */
2271
2272 static int
2273 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2274         const uint8_t *section, uint32_t byte_cnt)
2275 {
2276         int error = EINVAL;
2277         uint32_t chunk_sz, offset;
2278
2279         chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2280
2281         for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2282                 uint32_t addr, len;
2283                 const uint8_t *data;
2284
2285                 addr = dst_addr + offset;
2286                 len = MIN(chunk_sz, byte_cnt - offset);
2287                 data = section + offset;
2288
2289                 error = iwm_firmware_load_chunk(sc, addr, data, len);
2290                 if (error)
2291                         break;
2292         }
2293
2294         return error;
2295 }
2296
2297 static int
2298 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2299         const uint8_t *chunk, uint32_t byte_cnt)
2300 {
2301         struct iwm_dma_info *dma = &sc->fw_dma;
2302         int error;
2303
2304         /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2305         memcpy(dma->vaddr, chunk, byte_cnt);
2306         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2307
2308         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2309             dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2310                 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2311                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2312         }
2313
2314         sc->sc_fw_chunk_done = 0;
2315
2316         if (!iwm_nic_lock(sc))
2317                 return EBUSY;
2318
2319         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2320             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2321         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2322             dst_addr);
2323         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2324             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2325         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2326             (iwm_get_dma_hi_addr(dma->paddr)
2327               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2328         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2329             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2330             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2331             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2332         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2333             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2334             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2335             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2336
2337         iwm_nic_unlock(sc);
2338
2339         /* wait 1s for this segment to load */
2340         while (!sc->sc_fw_chunk_done)
2341                 if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
2342                         break;
2343
2344         if (!sc->sc_fw_chunk_done) {
2345                 device_printf(sc->sc_dev,
2346                     "fw chunk addr 0x%x len %d failed to load\n",
2347                     dst_addr, byte_cnt);
2348         }
2349
2350         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2351             dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2352                 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2353                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2354                 iwm_nic_unlock(sc);
2355         }
2356
2357         return error;
2358 }
2359
2360 int
2361 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2362     int cpu, int *first_ucode_section)
2363 {
2364         int shift_param;
2365         int i, error = 0, sec_num = 0x1;
2366         uint32_t val, last_read_idx = 0;
2367         const void *data;
2368         uint32_t dlen;
2369         uint32_t offset;
2370
2371         if (cpu == 1) {
2372                 shift_param = 0;
2373                 *first_ucode_section = 0;
2374         } else {
2375                 shift_param = 16;
2376                 (*first_ucode_section)++;
2377         }
2378
2379         for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
2380                 last_read_idx = i;
2381                 data = fws->fw_sect[i].fws_data;
2382                 dlen = fws->fw_sect[i].fws_len;
2383                 offset = fws->fw_sect[i].fws_devoff;
2384
2385                 /*
2386                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2387                  * CPU1 to CPU2.
2388                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2389                  * CPU2 non paged to CPU2 paging sec.
2390                  */
2391                 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2392                     offset == IWM_PAGING_SEPARATOR_SECTION)
2393                         break;
2394
2395                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2396                     "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2397                     i, offset, dlen, cpu);
2398
2399                 if (dlen > sc->sc_fwdmasegsz) {
2400                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2401                             "chunk %d too large (%d bytes)\n", i, dlen);
2402                         error = EFBIG;
2403                 } else {
2404                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2405                 }
2406                 if (error) {
2407                         device_printf(sc->sc_dev,
2408                             "could not load firmware chunk %d (error %d)\n",
2409                             i, error);
2410                         return error;
2411                 }
2412
2413                 /* Notify the ucode of the loaded section number and status */
2414                 if (iwm_nic_lock(sc)) {
2415                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2416                         val = val | (sec_num << shift_param);
2417                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2418                         sec_num = (sec_num << 1) | 0x1;
2419                         iwm_nic_unlock(sc);
2420
2421                         /*
2422                          * The firmware won't load correctly without this delay.
2423                          */
2424                         DELAY(8000);
2425                 }
2426         }
2427
2428         *first_ucode_section = last_read_idx;
2429
2430         if (iwm_nic_lock(sc)) {
2431                 if (cpu == 1)
2432                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2433                 else
2434                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2435                 iwm_nic_unlock(sc);
2436         }
2437
2438         return 0;
2439 }
2440
2441 int
2442 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2443 {
2444         struct iwm_fw_sects *fws;
2445         int error = 0;
2446         int first_ucode_section;
2447
2448         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2449             ucode_type);
2450
2451         fws = &sc->sc_fw.fw_sects[ucode_type];
2452
2453         /* configure the ucode to be ready to get the secured image */
2454         /* release CPU reset */
2455         iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2456
2457         /* load to FW the binary Secured sections of CPU1 */
2458         error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2459         if (error)
2460                 return error;
2461
2462         /* load to FW the binary sections of CPU2 */
2463         return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2464 }
2465
2466 static int
2467 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2468 {
2469         struct iwm_fw_sects *fws;
2470         int error, i;
2471         const void *data;
2472         uint32_t dlen;
2473         uint32_t offset;
2474
2475         sc->sc_uc.uc_intr = 0;
2476
2477         fws = &sc->sc_fw.fw_sects[ucode_type];
2478         for (i = 0; i < fws->fw_count; i++) {
2479                 data = fws->fw_sect[i].fws_data;
2480                 dlen = fws->fw_sect[i].fws_len;
2481                 offset = fws->fw_sect[i].fws_devoff;
2482                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2483                     "LOAD FIRMWARE type %d offset %u len %d\n",
2484                     ucode_type, offset, dlen);
2485                 if (dlen > sc->sc_fwdmasegsz) {
2486                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2487                             "chunk %d too large (%d bytes)\n", i, dlen);
2488                         error = EFBIG;
2489                 } else {
2490                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2491                 }
2492                 if (error) {
2493                         device_printf(sc->sc_dev,
2494                             "could not load firmware chunk %u of %u "
2495                             "(error=%d)\n", i, fws->fw_count, error);
2496                         return error;
2497                 }
2498         }
2499
2500         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2501
2502         return 0;
2503 }
2504
2505 static int
2506 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2507 {
2508         int error, w;
2509
2510         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
2511                 error = iwm_load_firmware_8000(sc, ucode_type);
2512         else
2513                 error = iwm_load_firmware_7000(sc, ucode_type);
2514         if (error)
2515                 return error;
2516
2517         /* wait for the firmware to load */
2518         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2519                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2520         }
2521         if (error || !sc->sc_uc.uc_ok) {
2522                 device_printf(sc->sc_dev, "could not load firmware\n");
2523                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2524                         device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2525                             iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2526                         device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2527                             iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2528                 }
2529         }
2530
2531         /*
2532          * Give the firmware some time to initialize.
2533          * Accessing it too early causes errors.
2534          */
2535         msleep(&w, &sc->sc_mtx, 0, "iwmfwinit", hz);
2536
2537         return error;
2538 }
2539
2540 /* iwlwifi: pcie/trans.c */
2541 static int
2542 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2543 {
2544         int error;
2545
2546         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2547
2548         if ((error = iwm_nic_init(sc)) != 0) {
2549                 device_printf(sc->sc_dev, "unable to init nic\n");
2550                 return error;
2551         }
2552
2553         /* make sure rfkill handshake bits are cleared */
2554         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2555         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2556             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2557
2558         /* clear (again), then enable host interrupts */
2559         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2560         iwm_enable_interrupts(sc);
2561
2562         /* really make sure rfkill handshake bits are cleared */
2563         /* maybe we should write a few times more?  just to make sure */
2564         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2565         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2566
2567         /* Load the given image to the HW */
2568         return iwm_load_firmware(sc, ucode_type);
2569 }
2570
2571 static int
2572 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2573 {
2574         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2575                 .valid = htole32(valid_tx_ant),
2576         };
2577
2578         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2579             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2580 }
2581
2582 /* iwlwifi: mvm/fw.c */
2583 static int
2584 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2585 {
2586         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2587         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2588
2589         /* Set parameters */
2590         phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2591         phy_cfg_cmd.calib_control.event_trigger =
2592             sc->sc_default_calib[ucode_type].event_trigger;
2593         phy_cfg_cmd.calib_control.flow_trigger =
2594             sc->sc_default_calib[ucode_type].flow_trigger;
2595
2596         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2597             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2598         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2599             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2600 }
2601
2602 static int
2603 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2604         enum iwm_ucode_type ucode_type)
2605 {
2606         enum iwm_ucode_type old_type = sc->sc_uc_current;
2607         int error;
2608
2609         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2610                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2611                         error);
2612                 return error;
2613         }
2614
2615         sc->sc_uc_current = ucode_type;
2616         error = iwm_start_fw(sc, ucode_type);
2617         if (error) {
2618                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2619                 sc->sc_uc_current = old_type;
2620                 return error;
2621         }
2622
2623         error = iwm_post_alive(sc);
2624         if (error) {
2625                 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2626         }
2627         return error;
2628 }
2629
2630 /*
2631  * mvm misc bits
2632  */
2633
2634 /*
2635  * follows iwlwifi/fw.c
2636  */
2637 static int
2638 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2639 {
2640         int error;
2641
2642         /* do not operate with rfkill switch turned on */
2643         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2644                 device_printf(sc->sc_dev,
2645                     "radio is disabled by hardware switch\n");
2646                 return EPERM;
2647         }
2648
2649         sc->sc_init_complete = 0;
2650         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2651             IWM_UCODE_TYPE_INIT)) != 0) {
2652                 device_printf(sc->sc_dev, "failed to load init firmware\n");
2653                 return error;
2654         }
2655
2656         if (justnvm) {
2657                 if ((error = iwm_nvm_init(sc)) != 0) {
2658                         device_printf(sc->sc_dev, "failed to read nvm\n");
2659                         return error;
2660                 }
2661                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2662
2663                 return 0;
2664         }
2665
2666         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
2667                 device_printf(sc->sc_dev,
2668                     "failed to send bt coex configuration: %d\n", error);
2669                 return error;
2670         }
2671
2672         /* Init Smart FIFO. */
2673         error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2674         if (error != 0)
2675                 return error;
2676
2677         /* Send TX valid antennas before triggering calibrations */
2678         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
2679                 device_printf(sc->sc_dev,
2680                     "failed to send antennas before calibration: %d\n", error);
2681                 return error;
2682         }
2683
2684         /*
2685          * Send phy configurations command to init uCode
2686          * to start the 16.0 uCode init image internal calibrations.
2687          */
2688         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2689                 device_printf(sc->sc_dev,
2690                     "%s: failed to run internal calibration: %d\n",
2691                     __func__, error);
2692                 return error;
2693         }
2694
2695         /*
2696          * Nothing to do but wait for the init complete notification
2697          * from the firmware
2698          */
2699         while (!sc->sc_init_complete) {
2700                 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2701                                  0, "iwminit", 2*hz);
2702                 if (error) {
2703                         device_printf(sc->sc_dev, "init complete failed: %d\n",
2704                                 sc->sc_init_complete);
2705                         break;
2706                 }
2707         }
2708
2709         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
2710             sc->sc_init_complete ? "" : "not ");
2711
2712         return error;
2713 }
2714
2715 /*
2716  * receive side
2717  */
2718
2719 /* (re)stock rx ring, called at init-time and at runtime */
2720 static int
2721 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2722 {
2723         struct iwm_rx_ring *ring = &sc->rxq;
2724         struct iwm_rx_data *data = &ring->data[idx];
2725         struct mbuf *m;
2726         bus_dmamap_t dmamap = NULL;
2727         bus_dma_segment_t seg;
2728         int nsegs, error;
2729
2730         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2731         if (m == NULL)
2732                 return ENOBUFS;
2733
2734         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2735         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2736             &seg, &nsegs, BUS_DMA_NOWAIT);
2737         if (error != 0) {
2738                 device_printf(sc->sc_dev,
2739                     "%s: can't map mbuf, error %d\n", __func__, error);
2740                 goto fail;
2741         }
2742
2743         if (data->m != NULL)
2744                 bus_dmamap_unload(ring->data_dmat, data->map);
2745
2746         /* Swap ring->spare_map with data->map */
2747         dmamap = data->map;
2748         data->map = ring->spare_map;
2749         ring->spare_map = dmamap;
2750
2751         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2752         data->m = m;
2753
2754         /* Update RX descriptor. */
2755         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
2756         ring->desc[idx] = htole32(seg.ds_addr >> 8);
2757         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2758             BUS_DMASYNC_PREWRITE);
2759
2760         return 0;
2761 fail:
2762         m_freem(m);
2763         return error;
2764 }
2765
2766 /* iwlwifi: mvm/rx.c */
2767 #define IWM_RSSI_OFFSET 50
2768 static int
2769 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2770 {
2771         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2772         uint32_t agc_a, agc_b;
2773         uint32_t val;
2774
2775         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2776         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2777         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2778
2779         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2780         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2781         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2782
2783         /*
2784          * dBm = rssi dB - agc dB - constant.
2785          * Higher AGC (higher radio gain) means lower signal.
2786          */
2787         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2788         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2789         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2790
2791         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2792             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2793             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2794
2795         return max_rssi_dbm;
2796 }
2797
2798 /* iwlwifi: mvm/rx.c */
2799 /*
2800  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2801  * values are reported by the fw as positive values - need to negate
2802  * to obtain their dBM.  Account for missing antennas by replacing 0
2803  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2804  */
2805 static int
2806 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2807 {
2808         int energy_a, energy_b, energy_c, max_energy;
2809         uint32_t val;
2810
2811         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2812         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2813             IWM_RX_INFO_ENERGY_ANT_A_POS;
2814         energy_a = energy_a ? -energy_a : -256;
2815         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2816             IWM_RX_INFO_ENERGY_ANT_B_POS;
2817         energy_b = energy_b ? -energy_b : -256;
2818         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2819             IWM_RX_INFO_ENERGY_ANT_C_POS;
2820         energy_c = energy_c ? -energy_c : -256;
2821         max_energy = MAX(energy_a, energy_b);
2822         max_energy = MAX(max_energy, energy_c);
2823
2824         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2825             "energy In A %d B %d C %d , and max %d\n",
2826             energy_a, energy_b, energy_c, max_energy);
2827
2828         return max_energy;
2829 }
2830
2831 static void
2832 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2833         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2834 {
2835         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2836
2837         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2838         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2839
2840         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2841 }
2842
2843 /*
2844  * Retrieve the average noise (in dBm) among receivers.
2845  */
2846 static int
2847 iwm_get_noise(struct iwm_softc *sc,
2848     const struct iwm_mvm_statistics_rx_non_phy *stats)
2849 {
2850         int i, total, nbant, noise;
2851
2852         total = nbant = noise = 0;
2853         for (i = 0; i < 3; i++) {
2854                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2855                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
2856                     __func__,
2857                     i,
2858                     noise);
2859
2860                 if (noise) {
2861                         total += noise;
2862                         nbant++;
2863                 }
2864         }
2865
2866         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
2867             __func__, nbant, total);
2868 #if 0
2869         /* There should be at least one antenna but check anyway. */
2870         return (nbant == 0) ? -127 : (total / nbant) - 107;
2871 #else
2872         /* For now, just hard-code it to -96 to be safe */
2873         return (-96);
2874 #endif
2875 }
2876
2877 /*
2878  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2879  *
2880  * Handles the actual data of the Rx packet from the fw
2881  */
2882 static void
2883 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2884         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2885 {
2886         struct ieee80211com *ic = &sc->sc_ic;
2887         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2888         struct ieee80211_frame *wh;
2889         struct ieee80211_node *ni;
2890         struct ieee80211_rx_stats rxs;
2891         struct mbuf *m;
2892         struct iwm_rx_phy_info *phy_info;
2893         struct iwm_rx_mpdu_res_start *rx_res;
2894         uint32_t len;
2895         uint32_t rx_pkt_status;
2896         int rssi;
2897
2898         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2899
2900         phy_info = &sc->sc_last_phy_info;
2901         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2902         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2903         len = le16toh(rx_res->byte_count);
2904         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2905
2906         m = data->m;
2907         m->m_data = pkt->data + sizeof(*rx_res);
2908         m->m_pkthdr.len = m->m_len = len;
2909
2910         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2911                 device_printf(sc->sc_dev,
2912                     "dsp size out of range [0,20]: %d\n",
2913                     phy_info->cfg_phy_cnt);
2914                 return;
2915         }
2916
2917         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2918             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2919                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2920                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2921                 return; /* drop */
2922         }
2923
2924         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2925                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2926         } else {
2927                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
2928         }
2929
2930         /* Note: RSSI is absolute (ie a -ve value) */
2931         if (rssi < IWM_MIN_DBM)
2932                 rssi = IWM_MIN_DBM;
2933         else if (rssi > IWM_MAX_DBM)
2934                 rssi = IWM_MAX_DBM;
2935
2936         /* Map it to relative value */
2937         rssi = rssi - sc->sc_noise;
2938
2939         /* replenish ring for the buffer we're going to feed to the sharks */
2940         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
2941                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
2942                     __func__);
2943                 return;
2944         }
2945
2946         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2947             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
2948
2949         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2950
2951         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2952             "%s: phy_info: channel=%d, flags=0x%08x\n",
2953             __func__,
2954             le16toh(phy_info->channel),
2955             le16toh(phy_info->phy_flags));
2956
2957         /*
2958          * Populate an RX state struct with the provided information.
2959          */
2960         bzero(&rxs, sizeof(rxs));
2961         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
2962         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
2963         rxs.c_ieee = le16toh(phy_info->channel);
2964         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2965                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
2966         } else {
2967                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
2968         }
2969
2970         /* rssi is in 1/2db units */
2971         rxs.rssi = rssi * 2;
2972         rxs.nf = sc->sc_noise;
2973
2974         if (ieee80211_radiotap_active_vap(vap)) {
2975                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
2976
2977                 tap->wr_flags = 0;
2978                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
2979                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2980                 tap->wr_chan_freq = htole16(rxs.c_freq);
2981                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
2982                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
2983                 tap->wr_dbm_antsignal = (int8_t)rssi;
2984                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
2985                 tap->wr_tsft = phy_info->system_timestamp;
2986                 switch (phy_info->rate) {
2987                 /* CCK rates. */
2988                 case  10: tap->wr_rate =   2; break;
2989                 case  20: tap->wr_rate =   4; break;
2990                 case  55: tap->wr_rate =  11; break;
2991                 case 110: tap->wr_rate =  22; break;
2992                 /* OFDM rates. */
2993                 case 0xd: tap->wr_rate =  12; break;
2994                 case 0xf: tap->wr_rate =  18; break;
2995                 case 0x5: tap->wr_rate =  24; break;
2996                 case 0x7: tap->wr_rate =  36; break;
2997                 case 0x9: tap->wr_rate =  48; break;
2998                 case 0xb: tap->wr_rate =  72; break;
2999                 case 0x1: tap->wr_rate =  96; break;
3000                 case 0x3: tap->wr_rate = 108; break;
3001                 /* Unknown rate: should not happen. */
3002                 default:  tap->wr_rate =   0;
3003                 }
3004         }
3005
3006         IWM_UNLOCK(sc);
3007         if (ni != NULL) {
3008                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3009                 ieee80211_input_mimo(ni, m, &rxs);
3010                 ieee80211_free_node(ni);
3011         } else {
3012                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3013                 ieee80211_input_mimo_all(ic, m, &rxs);
3014         }
3015         IWM_LOCK(sc);
3016 }
3017
3018 static int
3019 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3020         struct iwm_node *in)
3021 {
3022         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3023         struct ieee80211_node *ni = &in->in_ni;
3024         struct ieee80211vap *vap = ni->ni_vap;
3025         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3026         int failack = tx_resp->failure_frame;
3027
3028         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3029
3030         /* Update rate control statistics. */
3031         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3032             __func__,
3033             (int) le16toh(tx_resp->status.status),
3034             (int) le16toh(tx_resp->status.sequence),
3035             tx_resp->frame_count,
3036             tx_resp->bt_kill_count,
3037             tx_resp->failure_rts,
3038             tx_resp->failure_frame,
3039             le32toh(tx_resp->initial_rate),
3040             (int) le16toh(tx_resp->wireless_media_time));
3041
3042         if (status != IWM_TX_STATUS_SUCCESS &&
3043             status != IWM_TX_STATUS_DIRECT_DONE) {
3044                 ieee80211_ratectl_tx_complete(vap, ni,
3045                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3046                 return (1);
3047         } else {
3048                 ieee80211_ratectl_tx_complete(vap, ni,
3049                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3050                 return (0);
3051         }
3052 }
3053
3054 static void
3055 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3056         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3057 {
3058         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3059         int idx = cmd_hdr->idx;
3060         int qid = cmd_hdr->qid;
3061         struct iwm_tx_ring *ring = &sc->txq[qid];
3062         struct iwm_tx_data *txd = &ring->data[idx];
3063         struct iwm_node *in = txd->in;
3064         struct mbuf *m = txd->m;
3065         int status;
3066
3067         KASSERT(txd->done == 0, ("txd not done"));
3068         KASSERT(txd->in != NULL, ("txd without node"));
3069         KASSERT(txd->m != NULL, ("txd without mbuf"));
3070
3071         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3072
3073         sc->sc_tx_timer = 0;
3074
3075         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3076
3077         /* Unmap and free mbuf. */
3078         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3079         bus_dmamap_unload(ring->data_dmat, txd->map);
3080
3081         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3082             "free txd %p, in %p\n", txd, txd->in);
3083         txd->done = 1;
3084         txd->m = NULL;
3085         txd->in = NULL;
3086
3087         ieee80211_tx_complete(&in->in_ni, m, status);
3088
3089         if (--ring->queued < IWM_TX_RING_LOMARK) {
3090                 sc->qfullmsk &= ~(1 << ring->qid);
3091                 if (sc->qfullmsk == 0) {
3092                         iwm_start(sc);
3093                 }
3094         }
3095 }
3096
3097 /*
3098  * transmit side
3099  */
3100
3101 /*
3102  * Process a "command done" firmware notification.  This is where we wakeup
3103  * processes waiting for a synchronous command completion.
3104  * from if_iwn
3105  */
3106 static void
3107 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3108 {
3109         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3110         struct iwm_tx_data *data;
3111
3112         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3113                 return; /* Not a command ack. */
3114         }
3115
3116         /* XXX wide commands? */
3117         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3118             "cmd notification type 0x%x qid %d idx %d\n",
3119             pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3120
3121         data = &ring->data[pkt->hdr.idx];
3122
3123         /* If the command was mapped in an mbuf, free it. */
3124         if (data->m != NULL) {
3125                 bus_dmamap_sync(ring->data_dmat, data->map,
3126                     BUS_DMASYNC_POSTWRITE);
3127                 bus_dmamap_unload(ring->data_dmat, data->map);
3128                 m_freem(data->m);
3129                 data->m = NULL;
3130         }
3131         wakeup(&ring->desc[pkt->hdr.idx]);
3132 }
3133
3134 #if 0
3135 /*
3136  * necessary only for block ack mode
3137  */
3138 void
3139 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3140         uint16_t len)
3141 {
3142         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3143         uint16_t w_val;
3144
3145         scd_bc_tbl = sc->sched_dma.vaddr;
3146
3147         len += 8; /* magic numbers came naturally from paris */
3148         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3149                 len = roundup(len, 4) / 4;
3150
3151         w_val = htole16(sta_id << 12 | len);
3152
3153         /* Update TX scheduler. */
3154         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3155         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3156             BUS_DMASYNC_PREWRITE);
3157
3158         /* I really wonder what this is ?!? */
3159         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3160                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3161                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3162                     BUS_DMASYNC_PREWRITE);
3163         }
3164 }
3165 #endif
3166
3167 /*
3168  * Take an 802.11 (non-n) rate, find the relevant rate
3169  * table entry.  return the index into in_ridx[].
3170  *
3171  * The caller then uses that index back into in_ridx
3172  * to figure out the rate index programmed /into/
3173  * the firmware for this given node.
3174  */
3175 static int
3176 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3177     uint8_t rate)
3178 {
3179         int i;
3180         uint8_t r;
3181
3182         for (i = 0; i < nitems(in->in_ridx); i++) {
3183                 r = iwm_rates[in->in_ridx[i]].rate;
3184                 if (rate == r)
3185                         return (i);
3186         }
3187         /* XXX Return the first */
3188         /* XXX TODO: have it return the /lowest/ */
3189         return (0);
3190 }
3191
3192 /*
3193  * Fill in the rate related information for a transmit command.
3194  */
3195 static const struct iwm_rate *
3196 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3197         struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3198 {
3199         struct ieee80211com *ic = &sc->sc_ic;
3200         struct ieee80211_node *ni = &in->in_ni;
3201         const struct iwm_rate *rinfo;
3202         int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3203         int ridx, rate_flags;
3204
3205         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3206         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3207
3208         /*
3209          * XXX TODO: everything about the rate selection here is terrible!
3210          */
3211
3212         if (type == IEEE80211_FC0_TYPE_DATA) {
3213                 int i;
3214                 /* for data frames, use RS table */
3215                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3216                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3217                 ridx = in->in_ridx[i];
3218
3219                 /* This is the index into the programmed table */
3220                 tx->initial_rate_index = i;
3221                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3222                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3223                     "%s: start with i=%d, txrate %d\n",
3224                     __func__, i, iwm_rates[ridx].rate);
3225         } else {
3226                 /*
3227                  * For non-data, use the lowest supported rate for the given
3228                  * operational mode.
3229                  *
3230                  * Note: there may not be any rate control information available.
3231                  * This driver currently assumes if we're transmitting data
3232                  * frames, use the rate control table.  Grr.
3233                  *
3234                  * XXX TODO: use the configured rate for the traffic type!
3235                  * XXX TODO: this should be per-vap, not curmode; as we later
3236                  * on we'll want to handle off-channel stuff (eg TDLS).
3237                  */
3238                 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3239                         /*
3240                          * XXX this assumes the mode is either 11a or not 11a;
3241                          * definitely won't work for 11n.
3242                          */
3243                         ridx = IWM_RIDX_OFDM;
3244                 } else {
3245                         ridx = IWM_RIDX_CCK;
3246                 }
3247         }
3248
3249         rinfo = &iwm_rates[ridx];
3250
3251         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3252             __func__, ridx,
3253             rinfo->rate,
3254             !! (IWM_RIDX_IS_CCK(ridx))
3255             );
3256
3257         /* XXX TODO: hard-coded TX antenna? */
3258         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3259         if (IWM_RIDX_IS_CCK(ridx))
3260                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3261         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3262
3263         return rinfo;
3264 }
3265
3266 #define TB0_SIZE 16
3267 static int
3268 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3269 {
3270         struct ieee80211com *ic = &sc->sc_ic;
3271         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3272         struct iwm_node *in = IWM_NODE(ni);
3273         struct iwm_tx_ring *ring;
3274         struct iwm_tx_data *data;
3275         struct iwm_tfd *desc;
3276         struct iwm_device_cmd *cmd;
3277         struct iwm_tx_cmd *tx;
3278         struct ieee80211_frame *wh;
3279         struct ieee80211_key *k = NULL;
3280         struct mbuf *m1;
3281         const struct iwm_rate *rinfo;
3282         uint32_t flags;
3283         u_int hdrlen;
3284         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3285         int nsegs;
3286         uint8_t tid, type;
3287         int i, totlen, error, pad;
3288
3289         wh = mtod(m, struct ieee80211_frame *);
3290         hdrlen = ieee80211_anyhdrsize(wh);
3291         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3292         tid = 0;
3293         ring = &sc->txq[ac];
3294         desc = &ring->desc[ring->cur];
3295         memset(desc, 0, sizeof(*desc));
3296         data = &ring->data[ring->cur];
3297
3298         /* Fill out iwm_tx_cmd to send to the firmware */
3299         cmd = &ring->cmd[ring->cur];
3300         cmd->hdr.code = IWM_TX_CMD;
3301         cmd->hdr.flags = 0;
3302         cmd->hdr.qid = ring->qid;
3303         cmd->hdr.idx = ring->cur;
3304
3305         tx = (void *)cmd->data;
3306         memset(tx, 0, sizeof(*tx));
3307
3308         rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3309
3310         /* Encrypt the frame if need be. */
3311         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3312                 /* Retrieve key for TX && do software encryption. */
3313                 k = ieee80211_crypto_encap(ni, m);
3314                 if (k == NULL) {
3315                         m_freem(m);
3316                         return (ENOBUFS);
3317                 }
3318                 /* 802.11 header may have moved. */
3319                 wh = mtod(m, struct ieee80211_frame *);
3320         }
3321
3322         if (ieee80211_radiotap_active_vap(vap)) {
3323                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3324
3325                 tap->wt_flags = 0;
3326                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3327                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3328                 tap->wt_rate = rinfo->rate;
3329                 if (k != NULL)
3330                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3331                 ieee80211_radiotap_tx(vap, m);
3332         }
3333
3334
3335         totlen = m->m_pkthdr.len;
3336
3337         flags = 0;
3338         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3339                 flags |= IWM_TX_CMD_FLG_ACK;
3340         }
3341
3342         if (type == IEEE80211_FC0_TYPE_DATA
3343             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3344             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3345                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3346         }
3347
3348         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3349             type != IEEE80211_FC0_TYPE_DATA)
3350                 tx->sta_id = sc->sc_aux_sta.sta_id;
3351         else
3352                 tx->sta_id = IWM_STATION_ID;
3353
3354         if (type == IEEE80211_FC0_TYPE_MGT) {
3355                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3356
3357                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3358                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3359                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3360                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3361                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3362                 } else {
3363                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3364                 }
3365         } else {
3366                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3367         }
3368
3369         if (hdrlen & 3) {
3370                 /* First segment length must be a multiple of 4. */
3371                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3372                 pad = 4 - (hdrlen & 3);
3373         } else
3374                 pad = 0;
3375
3376         tx->driver_txop = 0;
3377         tx->next_frame_len = 0;
3378
3379         tx->len = htole16(totlen);
3380         tx->tid_tspec = tid;
3381         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3382
3383         /* Set physical address of "scratch area". */
3384         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3385         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3386
3387         /* Copy 802.11 header in TX command. */
3388         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3389
3390         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3391
3392         tx->sec_ctl = 0;
3393         tx->tx_flags |= htole32(flags);
3394
3395         /* Trim 802.11 header. */
3396         m_adj(m, hdrlen);
3397         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3398             segs, &nsegs, BUS_DMA_NOWAIT);
3399         if (error != 0) {
3400                 if (error != EFBIG) {
3401                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3402                             error);
3403                         m_freem(m);
3404                         return error;
3405                 }
3406                 /* Too many DMA segments, linearize mbuf. */
3407                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3408                 if (m1 == NULL) {
3409                         device_printf(sc->sc_dev,
3410                             "%s: could not defrag mbuf\n", __func__);
3411                         m_freem(m);
3412                         return (ENOBUFS);
3413                 }
3414                 m = m1;
3415
3416                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3417                     segs, &nsegs, BUS_DMA_NOWAIT);
3418                 if (error != 0) {
3419                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3420                             error);
3421                         m_freem(m);
3422                         return error;
3423                 }
3424         }
3425         data->m = m;
3426         data->in = in;
3427         data->done = 0;
3428
3429         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3430             "sending txd %p, in %p\n", data, data->in);
3431         KASSERT(data->in != NULL, ("node is NULL"));
3432
3433         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3434             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3435             ring->qid, ring->cur, totlen, nsegs,
3436             le32toh(tx->tx_flags),
3437             le32toh(tx->rate_n_flags),
3438             tx->initial_rate_index
3439             );
3440
3441         /* Fill TX descriptor. */
3442         desc->num_tbs = 2 + nsegs;
3443
3444         desc->tbs[0].lo = htole32(data->cmd_paddr);
3445         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3446             (TB0_SIZE << 4);
3447         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3448         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3449             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3450               + hdrlen + pad - TB0_SIZE) << 4);
3451
3452         /* Other DMA segments are for data payload. */
3453         for (i = 0; i < nsegs; i++) {
3454                 seg = &segs[i];
3455                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3456                 desc->tbs[i+2].hi_n_len = \
3457                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3458                     | ((seg->ds_len) << 4);
3459         }
3460
3461         bus_dmamap_sync(ring->data_dmat, data->map,
3462             BUS_DMASYNC_PREWRITE);
3463         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3464             BUS_DMASYNC_PREWRITE);
3465         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3466             BUS_DMASYNC_PREWRITE);
3467
3468 #if 0
3469         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3470 #endif
3471
3472         /* Kick TX ring. */
3473         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3474         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3475
3476         /* Mark TX ring as full if we reach a certain threshold. */
3477         if (++ring->queued > IWM_TX_RING_HIMARK) {
3478                 sc->qfullmsk |= 1 << ring->qid;
3479         }
3480
3481         return 0;
3482 }
3483
3484 static int
3485 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3486     const struct ieee80211_bpf_params *params)
3487 {
3488         struct ieee80211com *ic = ni->ni_ic;
3489         struct iwm_softc *sc = ic->ic_softc;
3490         int error = 0;
3491
3492         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3493             "->%s begin\n", __func__);
3494
3495         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3496                 m_freem(m);
3497                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3498                     "<-%s not RUNNING\n", __func__);
3499                 return (ENETDOWN);
3500         }
3501
3502         IWM_LOCK(sc);
3503         /* XXX fix this */
3504         if (params == NULL) {
3505                 error = iwm_tx(sc, m, ni, 0);
3506         } else {
3507                 error = iwm_tx(sc, m, ni, 0);
3508         }
3509         sc->sc_tx_timer = 5;
3510         IWM_UNLOCK(sc);
3511
3512         return (error);
3513 }
3514
3515 /*
3516  * mvm/tx.c
3517  */
3518
3519 #if 0
3520 /*
3521  * Note that there are transports that buffer frames before they reach
3522  * the firmware. This means that after flush_tx_path is called, the
3523  * queue might not be empty. The race-free way to handle this is to:
3524  * 1) set the station as draining
3525  * 2) flush the Tx path
3526  * 3) wait for the transport queues to be empty
3527  */
3528 int
3529 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3530 {
3531         struct iwm_tx_path_flush_cmd flush_cmd = {
3532                 .queues_ctl = htole32(tfd_msk),
3533                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3534         };
3535         int ret;
3536
3537         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3538             sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3539             sizeof(flush_cmd), &flush_cmd);
3540         if (ret)
3541                 device_printf(sc->sc_dev,
3542                     "Flushing tx queue failed: %d\n", ret);
3543         return ret;
3544 }
3545 #endif
3546
3547 /*
3548  * BEGIN mvm/sta.c
3549  */
3550
3551 static int
3552 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3553         struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3554 {
3555         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3556             cmd, status);
3557 }
3558
3559 /* send station add/update command to firmware */
3560 static int
3561 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3562 {
3563         struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3564         int ret;
3565         uint32_t status;
3566
3567         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3568
3569         add_sta_cmd.sta_id = IWM_STATION_ID;
3570         add_sta_cmd.mac_id_n_color
3571             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3572                 IWM_DEFAULT_COLOR));
3573         if (!update) {
3574                 int ac;
3575                 for (ac = 0; ac < WME_NUM_AC; ac++) {
3576                         add_sta_cmd.tfd_queue_msk |=
3577                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3578                 }
3579                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3580         }
3581         add_sta_cmd.add_modify = update ? 1 : 0;
3582         add_sta_cmd.station_flags_msk
3583             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3584         add_sta_cmd.tid_disable_tx = htole16(0xffff);
3585         if (update)
3586                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3587
3588         status = IWM_ADD_STA_SUCCESS;
3589         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3590         if (ret)
3591                 return ret;
3592
3593         switch (status) {
3594         case IWM_ADD_STA_SUCCESS:
3595                 break;
3596         default:
3597                 ret = EIO;
3598                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3599                 break;
3600         }
3601
3602         return ret;
3603 }
3604
3605 static int
3606 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3607 {
3608         return iwm_mvm_sta_send_to_fw(sc, in, 0);
3609 }
3610
3611 static int
3612 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3613 {
3614         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3615 }
3616
3617 static int
3618 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3619         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3620 {
3621         struct iwm_mvm_add_sta_cmd_v7 cmd;
3622         int ret;
3623         uint32_t status;
3624
3625         memset(&cmd, 0, sizeof(cmd));
3626         cmd.sta_id = sta->sta_id;
3627         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3628
3629         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3630         cmd.tid_disable_tx = htole16(0xffff);
3631
3632         if (addr)
3633                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3634
3635         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3636         if (ret)
3637                 return ret;
3638
3639         switch (status) {
3640         case IWM_ADD_STA_SUCCESS:
3641                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3642                     "%s: Internal station added.\n", __func__);
3643                 return 0;
3644         default:
3645                 device_printf(sc->sc_dev,
3646                     "%s: Add internal station failed, status=0x%x\n",
3647                     __func__, status);
3648                 ret = EIO;
3649                 break;
3650         }
3651         return ret;
3652 }
3653
3654 static int
3655 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3656 {
3657         int ret;
3658
3659         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3660         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3661
3662         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3663         if (ret)
3664                 return ret;
3665
3666         ret = iwm_mvm_add_int_sta_common(sc,
3667             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3668
3669         if (ret)
3670                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3671         return ret;
3672 }
3673
3674 /*
3675  * END mvm/sta.c
3676  */
3677
3678 /*
3679  * BEGIN mvm/quota.c
3680  */
3681
3682 static int
3683 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3684 {
3685         struct iwm_time_quota_cmd cmd;
3686         int i, idx, ret, num_active_macs, quota, quota_rem;
3687         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3688         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3689         uint16_t id;
3690
3691         memset(&cmd, 0, sizeof(cmd));
3692
3693         /* currently, PHY ID == binding ID */
3694         if (in) {
3695                 id = in->in_phyctxt->id;
3696                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3697                 colors[id] = in->in_phyctxt->color;
3698
3699                 if (1)
3700                         n_ifs[id] = 1;
3701         }
3702
3703         /*
3704          * The FW's scheduling session consists of
3705          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3706          * equally between all the bindings that require quota
3707          */
3708         num_active_macs = 0;
3709         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3710                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3711                 num_active_macs += n_ifs[i];
3712         }
3713
3714         quota = 0;
3715         quota_rem = 0;
3716         if (num_active_macs) {
3717                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3718                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3719         }
3720
3721         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3722                 if (colors[i] < 0)
3723                         continue;
3724
3725                 cmd.quotas[idx].id_and_color =
3726                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3727
3728                 if (n_ifs[i] <= 0) {
3729                         cmd.quotas[idx].quota = htole32(0);
3730                         cmd.quotas[idx].max_duration = htole32(0);
3731                 } else {
3732                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3733                         cmd.quotas[idx].max_duration = htole32(0);
3734                 }
3735                 idx++;
3736         }
3737
3738         /* Give the remainder of the session to the first binding */
3739         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3740
3741         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3742             sizeof(cmd), &cmd);
3743         if (ret)
3744                 device_printf(sc->sc_dev,
3745                     "%s: Failed to send quota: %d\n", __func__, ret);
3746         return ret;
3747 }
3748
3749 /*
3750  * END mvm/quota.c
3751  */
3752
3753 /*
3754  * ieee80211 routines
3755  */
3756
3757 /*
3758  * Change to AUTH state in 80211 state machine.  Roughly matches what
3759  * Linux does in bss_info_changed().
3760  */
3761 static int
3762 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3763 {
3764         struct ieee80211_node *ni;
3765         struct iwm_node *in;
3766         struct iwm_vap *iv = IWM_VAP(vap);
3767         uint32_t duration;
3768         int error;
3769
3770         /*
3771          * XXX i have a feeling that the vap node is being
3772          * freed from underneath us. Grr.
3773          */
3774         ni = ieee80211_ref_node(vap->iv_bss);
3775         in = IWM_NODE(ni);
3776         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3777             "%s: called; vap=%p, bss ni=%p\n",
3778             __func__,
3779             vap,
3780             ni);
3781
3782         in->in_assoc = 0;
3783
3784         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3785         if (error != 0)
3786                 return error;
3787
3788         error = iwm_allow_mcast(vap, sc);
3789         if (error) {
3790                 device_printf(sc->sc_dev,
3791                     "%s: failed to set multicast\n", __func__);
3792                 goto out;
3793         }
3794
3795         /*
3796          * This is where it deviates from what Linux does.
3797          *
3798          * Linux iwlwifi doesn't reset the nic each time, nor does it
3799          * call ctxt_add() here.  Instead, it adds it during vap creation,
3800          * and always does a mac_ctx_changed().
3801          *
3802          * The openbsd port doesn't attempt to do that - it reset things
3803          * at odd states and does the add here.
3804          *
3805          * So, until the state handling is fixed (ie, we never reset
3806          * the NIC except for a firmware failure, which should drag
3807          * the NIC back to IDLE, re-setup and re-add all the mac/phy
3808          * contexts that are required), let's do a dirty hack here.
3809          */
3810         if (iv->is_uploaded) {
3811                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3812                         device_printf(sc->sc_dev,
3813                             "%s: failed to update MAC\n", __func__);
3814                         goto out;
3815                 }
3816                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3817                     in->in_ni.ni_chan, 1, 1)) != 0) {
3818                         device_printf(sc->sc_dev,
3819                             "%s: failed update phy ctxt\n", __func__);
3820                         goto out;
3821                 }
3822                 in->in_phyctxt = &sc->sc_phyctxt[0];
3823
3824                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3825                         device_printf(sc->sc_dev,
3826                             "%s: binding update cmd\n", __func__);
3827                         goto out;
3828                 }
3829                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3830                         device_printf(sc->sc_dev,
3831                             "%s: failed to update sta\n", __func__);
3832                         goto out;
3833                 }
3834         } else {
3835                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3836                         device_printf(sc->sc_dev,
3837                             "%s: failed to add MAC\n", __func__);
3838                         goto out;
3839                 }
3840                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3841                     in->in_ni.ni_chan, 1, 1)) != 0) {
3842                         device_printf(sc->sc_dev,
3843                             "%s: failed add phy ctxt!\n", __func__);
3844                         error = ETIMEDOUT;
3845                         goto out;
3846                 }
3847                 in->in_phyctxt = &sc->sc_phyctxt[0];
3848
3849                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3850                         device_printf(sc->sc_dev,
3851                             "%s: binding add cmd\n", __func__);
3852                         goto out;
3853                 }
3854                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3855                         device_printf(sc->sc_dev,
3856                             "%s: failed to add sta\n", __func__);
3857                         goto out;
3858                 }
3859         }
3860
3861         /*
3862          * Prevent the FW from wandering off channel during association
3863          * by "protecting" the session with a time event.
3864          */
3865         /* XXX duration is in units of TU, not MS */
3866         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3867         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3868         DELAY(100);
3869
3870         error = 0;
3871 out:
3872         ieee80211_free_node(ni);
3873         return (error);
3874 }
3875
3876 static int
3877 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3878 {
3879         struct iwm_node *in = IWM_NODE(vap->iv_bss);
3880         int error;
3881
3882         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3883                 device_printf(sc->sc_dev,
3884                     "%s: failed to update STA\n", __func__);
3885                 return error;
3886         }
3887
3888         in->in_assoc = 1;
3889         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3890                 device_printf(sc->sc_dev,
3891                     "%s: failed to update MAC\n", __func__);
3892                 return error;
3893         }
3894
3895         return 0;
3896 }
3897
3898 static int
3899 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3900 {
3901         /*
3902          * Ok, so *technically* the proper set of calls for going
3903          * from RUN back to SCAN is:
3904          *
3905          * iwm_mvm_power_mac_disable(sc, in);
3906          * iwm_mvm_mac_ctxt_changed(sc, in);
3907          * iwm_mvm_rm_sta(sc, in);
3908          * iwm_mvm_update_quotas(sc, NULL);
3909          * iwm_mvm_mac_ctxt_changed(sc, in);
3910          * iwm_mvm_binding_remove_vif(sc, in);
3911          * iwm_mvm_mac_ctxt_remove(sc, in);
3912          *
3913          * However, that freezes the device not matter which permutations
3914          * and modifications are attempted.  Obviously, this driver is missing
3915          * something since it works in the Linux driver, but figuring out what
3916          * is missing is a little more complicated.  Now, since we're going
3917          * back to nothing anyway, we'll just do a complete device reset.
3918          * Up your's, device!
3919          */
3920         /* iwm_mvm_flush_tx_path(sc, 0xf, 1); */
3921         iwm_stop_device(sc);
3922         iwm_init_hw(sc);
3923         if (in)
3924                 in->in_assoc = 0;
3925         return 0;
3926
3927 #if 0
3928         int error;
3929
3930         iwm_mvm_power_mac_disable(sc, in);
3931
3932         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3933                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
3934                 return error;
3935         }
3936
3937         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
3938                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
3939                 return error;
3940         }
3941         error = iwm_mvm_rm_sta(sc, in);
3942         in->in_assoc = 0;
3943         iwm_mvm_update_quotas(sc, NULL);
3944         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3945                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
3946                 return error;
3947         }
3948         iwm_mvm_binding_remove_vif(sc, in);
3949
3950         iwm_mvm_mac_ctxt_remove(sc, in);
3951
3952         return error;
3953 #endif
3954 }
3955
3956 static struct ieee80211_node *
3957 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3958 {
3959         return malloc(sizeof (struct iwm_node), M_80211_NODE,
3960             M_NOWAIT | M_ZERO);
3961 }
3962
3963 static void
3964 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
3965 {
3966         struct ieee80211_node *ni = &in->in_ni;
3967         struct iwm_lq_cmd *lq = &in->in_lq;
3968         int nrates = ni->ni_rates.rs_nrates;
3969         int i, ridx, tab = 0;
3970         int txant = 0;
3971
3972         if (nrates > nitems(lq->rs_table)) {
3973                 device_printf(sc->sc_dev,
3974                     "%s: node supports %d rates, driver handles "
3975                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
3976                 return;
3977         }
3978         if (nrates == 0) {
3979                 device_printf(sc->sc_dev,
3980                     "%s: node supports 0 rates, odd!\n", __func__);
3981                 return;
3982         }
3983
3984         /*
3985          * XXX .. and most of iwm_node is not initialised explicitly;
3986          * it's all just 0x0 passed to the firmware.
3987          */
3988
3989         /* first figure out which rates we should support */
3990         /* XXX TODO: this isn't 11n aware /at all/ */
3991         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
3992         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3993             "%s: nrates=%d\n", __func__, nrates);
3994
3995         /*
3996          * Loop over nrates and populate in_ridx from the highest
3997          * rate to the lowest rate.  Remember, in_ridx[] has
3998          * IEEE80211_RATE_MAXSIZE entries!
3999          */
4000         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4001                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4002
4003                 /* Map 802.11 rate to HW rate index. */
4004                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4005                         if (iwm_rates[ridx].rate == rate)
4006                                 break;
4007                 if (ridx > IWM_RIDX_MAX) {
4008                         device_printf(sc->sc_dev,
4009                             "%s: WARNING: device rate for %d not found!\n",
4010                             __func__, rate);
4011                 } else {
4012                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4013                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4014                             __func__,
4015                             i,
4016                             rate,
4017                             ridx);
4018                         in->in_ridx[i] = ridx;
4019                 }
4020         }
4021
4022         /* then construct a lq_cmd based on those */
4023         memset(lq, 0, sizeof(*lq));
4024         lq->sta_id = IWM_STATION_ID;
4025
4026         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4027         if (ni->ni_flags & IEEE80211_NODE_HT)
4028                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4029
4030         /*
4031          * are these used? (we don't do SISO or MIMO)
4032          * need to set them to non-zero, though, or we get an error.
4033          */
4034         lq->single_stream_ant_msk = 1;
4035         lq->dual_stream_ant_msk = 1;
4036
4037         /*
4038          * Build the actual rate selection table.
4039          * The lowest bits are the rates.  Additionally,
4040          * CCK needs bit 9 to be set.  The rest of the bits
4041          * we add to the table select the tx antenna
4042          * Note that we add the rates in the highest rate first
4043          * (opposite of ni_rates).
4044          */
4045         /*
4046          * XXX TODO: this should be looping over the min of nrates
4047          * and LQ_MAX_RETRY_NUM.  Sigh.
4048          */
4049         for (i = 0; i < nrates; i++) {
4050                 int nextant;
4051
4052                 if (txant == 0)
4053                         txant = iwm_fw_valid_tx_ant(sc);
4054                 nextant = 1<<(ffs(txant)-1);
4055                 txant &= ~nextant;
4056
4057                 /*
4058                  * Map the rate id into a rate index into
4059                  * our hardware table containing the
4060                  * configuration to use for this rate.
4061                  */
4062                 ridx = in->in_ridx[i];
4063                 tab = iwm_rates[ridx].plcp;
4064                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4065                 if (IWM_RIDX_IS_CCK(ridx))
4066                         tab |= IWM_RATE_MCS_CCK_MSK;
4067                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4068                     "station rate i=%d, rate=%d, hw=%x\n",
4069                     i, iwm_rates[ridx].rate, tab);
4070                 lq->rs_table[i] = htole32(tab);
4071         }
4072         /* then fill the rest with the lowest possible rate */
4073         for (i = nrates; i < nitems(lq->rs_table); i++) {
4074                 KASSERT(tab != 0, ("invalid tab"));
4075                 lq->rs_table[i] = htole32(tab);
4076         }
4077 }
4078
4079 static int
4080 iwm_media_change(struct ifnet *ifp)
4081 {
4082         struct ieee80211vap *vap = ifp->if_softc;
4083         struct ieee80211com *ic = vap->iv_ic;
4084         struct iwm_softc *sc = ic->ic_softc;
4085         int error;
4086
4087         error = ieee80211_media_change(ifp);
4088         if (error != ENETRESET)
4089                 return error;
4090
4091         IWM_LOCK(sc);
4092         if (ic->ic_nrunning > 0) {
4093                 iwm_stop(sc);
4094                 iwm_init(sc);
4095         }
4096         IWM_UNLOCK(sc);
4097         return error;
4098 }
4099
4100
4101 static int
4102 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4103 {
4104         struct iwm_vap *ivp = IWM_VAP(vap);
4105         struct ieee80211com *ic = vap->iv_ic;
4106         struct iwm_softc *sc = ic->ic_softc;
4107         struct iwm_node *in;
4108         int error;
4109
4110         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4111             "switching state %s -> %s\n",
4112             ieee80211_state_name[vap->iv_state],
4113             ieee80211_state_name[nstate]);
4114         IEEE80211_UNLOCK(ic);
4115         IWM_LOCK(sc);
4116
4117         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4118                 iwm_led_blink_stop(sc);
4119
4120         /* disable beacon filtering if we're hopping out of RUN */
4121         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4122                 iwm_mvm_disable_beacon_filter(sc);
4123
4124                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4125                         in->in_assoc = 0;
4126
4127                 iwm_release(sc, NULL);
4128
4129                 /*
4130                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4131                  * above then the card will be completely reinitialized,
4132                  * so the driver must do everything necessary to bring the card
4133                  * from INIT to SCAN.
4134                  *
4135                  * Additionally, upon receiving deauth frame from AP,
4136                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4137                  * state. This will also fail with this driver, so bring the FSM
4138                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4139                  *
4140                  * XXX TODO: fix this for FreeBSD!
4141                  */
4142                 if (nstate == IEEE80211_S_SCAN ||
4143                     nstate == IEEE80211_S_AUTH ||
4144                     nstate == IEEE80211_S_ASSOC) {
4145                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4146                             "Force transition to INIT; MGT=%d\n", arg);
4147                         IWM_UNLOCK(sc);
4148                         IEEE80211_LOCK(ic);
4149                         /* Always pass arg as -1 since we can't Tx right now. */
4150                         /*
4151                          * XXX arg is just ignored anyway when transitioning
4152                          *     to IEEE80211_S_INIT.
4153                          */
4154                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4155                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4156                             "Going INIT->SCAN\n");
4157                         nstate = IEEE80211_S_SCAN;
4158                         IEEE80211_UNLOCK(ic);
4159                         IWM_LOCK(sc);
4160                 }
4161         }
4162
4163         switch (nstate) {
4164         case IEEE80211_S_INIT:
4165                 break;
4166
4167         case IEEE80211_S_AUTH:
4168                 if ((error = iwm_auth(vap, sc)) != 0) {
4169                         device_printf(sc->sc_dev,
4170                             "%s: could not move to auth state: %d\n",
4171                             __func__, error);
4172                         break;
4173                 }
4174                 break;
4175
4176         case IEEE80211_S_ASSOC:
4177                 if ((error = iwm_assoc(vap, sc)) != 0) {
4178                         device_printf(sc->sc_dev,
4179                             "%s: failed to associate: %d\n", __func__,
4180                             error);
4181                         break;
4182                 }
4183                 break;
4184
4185         case IEEE80211_S_RUN:
4186         {
4187                 struct iwm_host_cmd cmd = {
4188                         .id = IWM_LQ_CMD,
4189                         .len = { sizeof(in->in_lq), },
4190                         .flags = IWM_CMD_SYNC,
4191                 };
4192
4193                 /* Update the association state, now we have it all */
4194                 /* (eg associd comes in at this point */
4195                 error = iwm_assoc(vap, sc);
4196                 if (error != 0) {
4197                         device_printf(sc->sc_dev,
4198                             "%s: failed to update association state: %d\n",
4199                             __func__,
4200                             error);
4201                         break;
4202                 }
4203
4204                 in = IWM_NODE(vap->iv_bss);
4205                 iwm_mvm_power_mac_update_mode(sc, in);
4206                 iwm_mvm_enable_beacon_filter(sc, in);
4207                 iwm_mvm_update_quotas(sc, in);
4208                 iwm_setrates(sc, in);
4209
4210                 cmd.data[0] = &in->in_lq;
4211                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4212                         device_printf(sc->sc_dev,
4213                             "%s: IWM_LQ_CMD failed\n", __func__);
4214                 }
4215
4216                 iwm_mvm_led_enable(sc);
4217                 break;
4218         }
4219
4220         default:
4221                 break;
4222         }
4223         IWM_UNLOCK(sc);
4224         IEEE80211_LOCK(ic);
4225
4226         return (ivp->iv_newstate(vap, nstate, arg));
4227 }
4228
4229 void
4230 iwm_endscan_cb(void *arg, int pending)
4231 {
4232         struct iwm_softc *sc = arg;
4233         struct ieee80211com *ic = &sc->sc_ic;
4234
4235         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4236             "%s: scan ended\n",
4237             __func__);
4238
4239         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4240 }
4241
4242 /*
4243  * Aging and idle timeouts for the different possible scenarios
4244  * in default configuration
4245  */
4246 static const uint32_t
4247 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4248         {
4249                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4250                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4251         },
4252         {
4253                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4254                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4255         },
4256         {
4257                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4258                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4259         },
4260         {
4261                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4262                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4263         },
4264         {
4265                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4266                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4267         },
4268 };
4269
4270 /*
4271  * Aging and idle timeouts for the different possible scenarios
4272  * in single BSS MAC configuration.
4273  */
4274 static const uint32_t
4275 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4276         {
4277                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4278                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4279         },
4280         {
4281                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4282                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4283         },
4284         {
4285                 htole32(IWM_SF_MCAST_AGING_TIMER),
4286                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4287         },
4288         {
4289                 htole32(IWM_SF_BA_AGING_TIMER),
4290                 htole32(IWM_SF_BA_IDLE_TIMER)
4291         },
4292         {
4293                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4294                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4295         },
4296 };
4297
4298 static void
4299 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4300     struct ieee80211_node *ni)
4301 {
4302         int i, j, watermark;
4303
4304         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4305
4306         /*
4307          * If we are in association flow - check antenna configuration
4308          * capabilities of the AP station, and choose the watermark accordingly.
4309          */
4310         if (ni) {
4311                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4312 #ifdef notyet
4313                         if (ni->ni_rxmcs[2] != 0)
4314                                 watermark = IWM_SF_W_MARK_MIMO3;
4315                         else if (ni->ni_rxmcs[1] != 0)
4316                                 watermark = IWM_SF_W_MARK_MIMO2;
4317                         else
4318 #endif
4319                                 watermark = IWM_SF_W_MARK_SISO;
4320                 } else {
4321                         watermark = IWM_SF_W_MARK_LEGACY;
4322                 }
4323         /* default watermark value for unassociated mode. */
4324         } else {
4325                 watermark = IWM_SF_W_MARK_MIMO2;
4326         }
4327         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4328
4329         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4330                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4331                         sf_cmd->long_delay_timeouts[i][j] =
4332                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4333                 }
4334         }
4335
4336         if (ni) {
4337                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4338                        sizeof(iwm_sf_full_timeout));
4339         } else {
4340                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4341                        sizeof(iwm_sf_full_timeout_def));
4342         }
4343 }
4344
4345 static int
4346 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4347 {
4348         struct ieee80211com *ic = &sc->sc_ic;
4349         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4350         struct iwm_sf_cfg_cmd sf_cmd = {
4351                 .state = htole32(IWM_SF_FULL_ON),
4352         };
4353         int ret = 0;
4354
4355         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
4356                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4357
4358         switch (new_state) {
4359         case IWM_SF_UNINIT:
4360         case IWM_SF_INIT_OFF:
4361                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4362                 break;
4363         case IWM_SF_FULL_ON:
4364                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4365                 break;
4366         default:
4367                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4368                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4369                           new_state);
4370                 return EINVAL;
4371         }
4372
4373         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4374                                    sizeof(sf_cmd), &sf_cmd);
4375         return ret;
4376 }
4377
4378 static int
4379 iwm_send_bt_init_conf(struct iwm_softc *sc)
4380 {
4381         struct iwm_bt_coex_cmd bt_cmd;
4382
4383         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4384         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4385
4386         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4387             &bt_cmd);
4388 }
4389
4390 static int
4391 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4392 {
4393         struct iwm_mcc_update_cmd mcc_cmd;
4394         struct iwm_host_cmd hcmd = {
4395                 .id = IWM_MCC_UPDATE_CMD,
4396                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4397                 .data = { &mcc_cmd },
4398         };
4399         int ret;
4400 #ifdef IWM_DEBUG
4401         struct iwm_rx_packet *pkt;
4402         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4403         struct iwm_mcc_update_resp *mcc_resp;
4404         int n_channels;
4405         uint16_t mcc;
4406 #endif
4407         int resp_v2 = isset(sc->sc_enabled_capa,
4408             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4409
4410         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4411         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4412         if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4413             isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4414                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4415         else
4416                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4417
4418         if (resp_v2)
4419                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4420         else
4421                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4422
4423         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4424             "send MCC update to FW with '%c%c' src = %d\n",
4425             alpha2[0], alpha2[1], mcc_cmd.source_id);
4426
4427         ret = iwm_send_cmd(sc, &hcmd);
4428         if (ret)
4429                 return ret;
4430
4431 #ifdef IWM_DEBUG
4432         pkt = hcmd.resp_pkt;
4433
4434         /* Extract MCC response */
4435         if (resp_v2) {
4436                 mcc_resp = (void *)pkt->data;
4437                 mcc = mcc_resp->mcc;
4438                 n_channels =  le32toh(mcc_resp->n_channels);
4439         } else {
4440                 mcc_resp_v1 = (void *)pkt->data;
4441                 mcc = mcc_resp_v1->mcc;
4442                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4443         }
4444
4445         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4446         if (mcc == 0)
4447                 mcc = 0x3030;  /* "00" - world */
4448
4449         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4450             "regulatory domain '%c%c' (%d channels available)\n",
4451             mcc >> 8, mcc & 0xff, n_channels);
4452 #endif
4453         iwm_free_resp(sc, &hcmd);
4454
4455         return 0;
4456 }
4457
4458 static void
4459 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4460 {
4461         struct iwm_host_cmd cmd = {
4462                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4463                 .len = { sizeof(uint32_t), },
4464                 .data = { &backoff, },
4465         };
4466
4467         if (iwm_send_cmd(sc, &cmd) != 0) {
4468                 device_printf(sc->sc_dev,
4469                     "failed to change thermal tx backoff\n");
4470         }
4471 }
4472
4473 static int
4474 iwm_init_hw(struct iwm_softc *sc)
4475 {
4476         struct ieee80211com *ic = &sc->sc_ic;
4477         int error, i, ac;
4478
4479         if ((error = iwm_start_hw(sc)) != 0) {
4480                 printf("iwm_start_hw: failed %d\n", error);
4481                 return error;
4482         }
4483
4484         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4485                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4486                 return error;
4487         }
4488
4489         /*
4490          * should stop and start HW since that INIT
4491          * image just loaded
4492          */
4493         iwm_stop_device(sc);
4494         if ((error = iwm_start_hw(sc)) != 0) {
4495                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4496                 return error;
4497         }
4498
4499         /* omstart, this time with the regular firmware */
4500         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
4501         if (error) {
4502                 device_printf(sc->sc_dev, "could not load firmware\n");
4503                 goto error;
4504         }
4505
4506         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4507                 device_printf(sc->sc_dev, "bt init conf failed\n");
4508                 goto error;
4509         }
4510
4511         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
4512                 device_printf(sc->sc_dev, "antenna config failed\n");
4513                 goto error;
4514         }
4515
4516         /* Send phy db control command and then phy db calibration*/
4517         if ((error = iwm_send_phy_db_data(sc)) != 0) {
4518                 device_printf(sc->sc_dev, "phy_db_data failed\n");
4519                 goto error;
4520         }
4521
4522         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4523                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4524                 goto error;
4525         }
4526
4527         /* Add auxiliary station for scanning */
4528         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4529                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4530                 goto error;
4531         }
4532
4533         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4534                 /*
4535                  * The channel used here isn't relevant as it's
4536                  * going to be overwritten in the other flows.
4537                  * For now use the first channel we have.
4538                  */
4539                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4540                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4541                         goto error;
4542         }
4543
4544         /* Initialize tx backoffs to the minimum. */
4545         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
4546                 iwm_mvm_tt_tx_backoff(sc, 0);
4547
4548         error = iwm_mvm_power_update_device(sc);
4549         if (error)
4550                 goto error;
4551
4552         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4553                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4554                         goto error;
4555         }
4556
4557         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4558                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4559                         goto error;
4560         }
4561
4562         /* Enable Tx queues. */
4563         for (ac = 0; ac < WME_NUM_AC; ac++) {
4564                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4565                     iwm_mvm_ac_to_tx_fifo[ac]);
4566                 if (error)
4567                         goto error;
4568         }
4569
4570         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4571                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4572                 goto error;
4573         }
4574
4575         return 0;
4576
4577  error:
4578         iwm_stop_device(sc);
4579         return error;
4580 }
4581
4582 /* Allow multicast from our BSSID. */
4583 static int
4584 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4585 {
4586         struct ieee80211_node *ni = vap->iv_bss;
4587         struct iwm_mcast_filter_cmd *cmd;
4588         size_t size;
4589         int error;
4590
4591         size = roundup(sizeof(*cmd), 4);
4592         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4593         if (cmd == NULL)
4594                 return ENOMEM;
4595         cmd->filter_own = 1;
4596         cmd->port_id = 0;
4597         cmd->count = 0;
4598         cmd->pass_all = 1;
4599         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4600
4601         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4602             IWM_CMD_SYNC, size, cmd);
4603         free(cmd, M_DEVBUF);
4604
4605         return (error);
4606 }
4607
4608 /*
4609  * ifnet interfaces
4610  */
4611
4612 static void
4613 iwm_init(struct iwm_softc *sc)
4614 {
4615         int error;
4616
4617         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4618                 return;
4619         }
4620         sc->sc_generation++;
4621         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4622
4623         if ((error = iwm_init_hw(sc)) != 0) {
4624                 printf("iwm_init_hw failed %d\n", error);
4625                 iwm_stop(sc);
4626                 return;
4627         }
4628
4629         /*
4630          * Ok, firmware loaded and we are jogging
4631          */
4632         sc->sc_flags |= IWM_FLAG_HW_INITED;
4633         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4634 }
4635
4636 static int
4637 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4638 {
4639         struct iwm_softc *sc;
4640         int error;
4641
4642         sc = ic->ic_softc;
4643
4644         IWM_LOCK(sc);
4645         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4646                 IWM_UNLOCK(sc);
4647                 return (ENXIO);
4648         }
4649         error = mbufq_enqueue(&sc->sc_snd, m);
4650         if (error) {
4651                 IWM_UNLOCK(sc);
4652                 return (error);
4653         }
4654         iwm_start(sc);
4655         IWM_UNLOCK(sc);
4656         return (0);
4657 }
4658
4659 /*
4660  * Dequeue packets from sendq and call send.
4661  */
4662 static void
4663 iwm_start(struct iwm_softc *sc)
4664 {
4665         struct ieee80211_node *ni;
4666         struct mbuf *m;
4667         int ac = 0;
4668
4669         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4670         while (sc->qfullmsk == 0 &&
4671                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4672                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4673                 if (iwm_tx(sc, m, ni, ac) != 0) {
4674                         if_inc_counter(ni->ni_vap->iv_ifp,
4675                             IFCOUNTER_OERRORS, 1);
4676                         ieee80211_free_node(ni);
4677                         continue;
4678                 }
4679                 sc->sc_tx_timer = 15;
4680         }
4681         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4682 }
4683
4684 static void
4685 iwm_stop(struct iwm_softc *sc)
4686 {
4687
4688         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4689         sc->sc_flags |= IWM_FLAG_STOPPED;
4690         sc->sc_generation++;
4691         iwm_led_blink_stop(sc);
4692         sc->sc_tx_timer = 0;
4693         iwm_stop_device(sc);
4694 }
4695
4696 static void
4697 iwm_watchdog(void *arg)
4698 {
4699         struct iwm_softc *sc = arg;
4700         struct ieee80211com *ic = &sc->sc_ic;
4701
4702         if (sc->sc_tx_timer > 0) {
4703                 if (--sc->sc_tx_timer == 0) {
4704                         device_printf(sc->sc_dev, "device timeout\n");
4705 #ifdef IWM_DEBUG
4706                         iwm_nic_error(sc);
4707 #endif
4708                         ieee80211_restart_all(ic);
4709                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4710                         return;
4711                 }
4712         }
4713         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4714 }
4715
4716 static void
4717 iwm_parent(struct ieee80211com *ic)
4718 {
4719         struct iwm_softc *sc = ic->ic_softc;
4720         int startall = 0;
4721
4722         IWM_LOCK(sc);
4723         if (ic->ic_nrunning > 0) {
4724                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4725                         iwm_init(sc);
4726                         startall = 1;
4727                 }
4728         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4729                 iwm_stop(sc);
4730         IWM_UNLOCK(sc);
4731         if (startall)
4732                 ieee80211_start_all(ic);
4733 }
4734
4735 /*
4736  * The interrupt side of things
4737  */
4738
4739 /*
4740  * error dumping routines are from iwlwifi/mvm/utils.c
4741  */
4742
4743 /*
4744  * Note: This structure is read from the device with IO accesses,
4745  * and the reading already does the endian conversion. As it is
4746  * read with uint32_t-sized accesses, any members with a different size
4747  * need to be ordered correctly though!
4748  */
4749 struct iwm_error_event_table {
4750         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4751         uint32_t error_id;              /* type of error */
4752         uint32_t trm_hw_status0;        /* TRM HW status */
4753         uint32_t trm_hw_status1;        /* TRM HW status */
4754         uint32_t blink2;                /* branch link */
4755         uint32_t ilink1;                /* interrupt link */
4756         uint32_t ilink2;                /* interrupt link */
4757         uint32_t data1;         /* error-specific data */
4758         uint32_t data2;         /* error-specific data */
4759         uint32_t data3;         /* error-specific data */
4760         uint32_t bcon_time;             /* beacon timer */
4761         uint32_t tsf_low;               /* network timestamp function timer */
4762         uint32_t tsf_hi;                /* network timestamp function timer */
4763         uint32_t gp1;           /* GP1 timer register */
4764         uint32_t gp2;           /* GP2 timer register */
4765         uint32_t fw_rev_type;   /* firmware revision type */
4766         uint32_t major;         /* uCode version major */
4767         uint32_t minor;         /* uCode version minor */
4768         uint32_t hw_ver;                /* HW Silicon version */
4769         uint32_t brd_ver;               /* HW board version */
4770         uint32_t log_pc;                /* log program counter */
4771         uint32_t frame_ptr;             /* frame pointer */
4772         uint32_t stack_ptr;             /* stack pointer */
4773         uint32_t hcmd;          /* last host command header */
4774         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4775                                  * rxtx_flag */
4776         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4777                                  * host_flag */
4778         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4779                                  * enc_flag */
4780         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4781                                  * time_flag */
4782         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4783                                  * wico interrupt */
4784         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
4785         uint32_t wait_event;            /* wait event() caller address */
4786         uint32_t l2p_control;   /* L2pControlField */
4787         uint32_t l2p_duration;  /* L2pDurationField */
4788         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4789         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4790         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4791                                  * (LMPM_PMG_SEL) */
4792         uint32_t u_timestamp;   /* indicate when the date and time of the
4793                                  * compilation */
4794         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
4795 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4796
4797 /*
4798  * UMAC error struct - relevant starting from family 8000 chip.
4799  * Note: This structure is read from the device with IO accesses,
4800  * and the reading already does the endian conversion. As it is
4801  * read with u32-sized accesses, any members with a different size
4802  * need to be ordered correctly though!
4803  */
4804 struct iwm_umac_error_event_table {
4805         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4806         uint32_t error_id;      /* type of error */
4807         uint32_t blink1;        /* branch link */
4808         uint32_t blink2;        /* branch link */
4809         uint32_t ilink1;        /* interrupt link */
4810         uint32_t ilink2;        /* interrupt link */
4811         uint32_t data1;         /* error-specific data */
4812         uint32_t data2;         /* error-specific data */
4813         uint32_t data3;         /* error-specific data */
4814         uint32_t umac_major;
4815         uint32_t umac_minor;
4816         uint32_t frame_pointer; /* core register 27*/
4817         uint32_t stack_pointer; /* core register 28 */
4818         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
4819         uint32_t nic_isr_pref;  /* ISR status register */
4820 } __packed;
4821
4822 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4823 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4824
4825 #ifdef IWM_DEBUG
4826 struct {
4827         const char *name;
4828         uint8_t num;
4829 } advanced_lookup[] = {
4830         { "NMI_INTERRUPT_WDG", 0x34 },
4831         { "SYSASSERT", 0x35 },
4832         { "UCODE_VERSION_MISMATCH", 0x37 },
4833         { "BAD_COMMAND", 0x38 },
4834         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4835         { "FATAL_ERROR", 0x3D },
4836         { "NMI_TRM_HW_ERR", 0x46 },
4837         { "NMI_INTERRUPT_TRM", 0x4C },
4838         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4839         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4840         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4841         { "NMI_INTERRUPT_HOST", 0x66 },
4842         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4843         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4844         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4845         { "ADVANCED_SYSASSERT", 0 },
4846 };
4847
4848 static const char *
4849 iwm_desc_lookup(uint32_t num)
4850 {
4851         int i;
4852
4853         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4854                 if (advanced_lookup[i].num == num)
4855                         return advanced_lookup[i].name;
4856
4857         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4858         return advanced_lookup[i].name;
4859 }
4860
4861 static void
4862 iwm_nic_umac_error(struct iwm_softc *sc)
4863 {
4864         struct iwm_umac_error_event_table table;
4865         uint32_t base;
4866
4867         base = sc->sc_uc.uc_umac_error_event_table;
4868
4869         if (base < 0x800000) {
4870                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
4871                     base);
4872                 return;
4873         }
4874
4875         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4876                 device_printf(sc->sc_dev, "reading errlog failed\n");
4877                 return;
4878         }
4879
4880         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4881                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
4882                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4883                     sc->sc_flags, table.valid);
4884         }
4885
4886         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
4887                 iwm_desc_lookup(table.error_id));
4888         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
4889         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
4890         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
4891             table.ilink1);
4892         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
4893             table.ilink2);
4894         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
4895         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
4896         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
4897         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
4898         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
4899         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
4900             table.frame_pointer);
4901         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
4902             table.stack_pointer);
4903         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
4904         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
4905             table.nic_isr_pref);
4906 }
4907
4908 /*
4909  * Support for dumping the error log seemed like a good idea ...
4910  * but it's mostly hex junk and the only sensible thing is the
4911  * hw/ucode revision (which we know anyway).  Since it's here,
4912  * I'll just leave it in, just in case e.g. the Intel guys want to
4913  * help us decipher some "ADVANCED_SYSASSERT" later.
4914  */
4915 static void
4916 iwm_nic_error(struct iwm_softc *sc)
4917 {
4918         struct iwm_error_event_table table;
4919         uint32_t base;
4920
4921         device_printf(sc->sc_dev, "dumping device error log\n");
4922         base = sc->sc_uc.uc_error_event_table;
4923         if (base < 0x800000) {
4924                 device_printf(sc->sc_dev,
4925                     "Invalid error log pointer 0x%08x\n", base);
4926                 return;
4927         }
4928
4929         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4930                 device_printf(sc->sc_dev, "reading errlog failed\n");
4931                 return;
4932         }
4933
4934         if (!table.valid) {
4935                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
4936                 return;
4937         }
4938
4939         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4940                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
4941                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4942                     sc->sc_flags, table.valid);
4943         }
4944
4945         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
4946             iwm_desc_lookup(table.error_id));
4947         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
4948             table.trm_hw_status0);
4949         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
4950             table.trm_hw_status1);
4951         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
4952         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
4953         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
4954         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
4955         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
4956         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
4957         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
4958         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
4959         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
4960         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
4961         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
4962         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
4963             table.fw_rev_type);
4964         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
4965         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
4966         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
4967         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
4968         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
4969         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
4970         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
4971         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
4972         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
4973         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
4974         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
4975         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
4976         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
4977         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
4978         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
4979         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
4980         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
4981         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
4982         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
4983
4984         if (sc->sc_uc.uc_umac_error_event_table)
4985                 iwm_nic_umac_error(sc);
4986 }
4987 #endif
4988
4989 #define SYNC_RESP_STRUCT(_var_, _pkt_)                                  \
4990 do {                                                                    \
4991         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4992         _var_ = (void *)((_pkt_)+1);                                    \
4993 } while (/*CONSTCOND*/0)
4994
4995 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)                              \
4996 do {                                                                    \
4997         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4998         _ptr_ = (void *)((_pkt_)+1);                                    \
4999 } while (/*CONSTCOND*/0)
5000
5001 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5002
5003 /*
5004  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5005  * Basic structure from if_iwn
5006  */
5007 static void
5008 iwm_notif_intr(struct iwm_softc *sc)
5009 {
5010         struct ieee80211com *ic = &sc->sc_ic;
5011         uint16_t hw;
5012
5013         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5014             BUS_DMASYNC_POSTREAD);
5015
5016         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5017
5018         /*
5019          * Process responses
5020          */
5021         while (sc->rxq.cur != hw) {
5022                 struct iwm_rx_ring *ring = &sc->rxq;
5023                 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
5024                 struct iwm_rx_packet *pkt;
5025                 struct iwm_cmd_response *cresp;
5026                 int qid, idx, code;
5027
5028                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5029                     BUS_DMASYNC_POSTREAD);
5030                 pkt = mtod(data->m, struct iwm_rx_packet *);
5031
5032                 qid = pkt->hdr.qid & ~0x80;
5033                 idx = pkt->hdr.idx;
5034
5035                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5036                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5037                     "rx packet qid=%d idx=%d type=%x %d %d\n",
5038                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, sc->rxq.cur, hw);
5039
5040                 /*
5041                  * randomly get these from the firmware, no idea why.
5042                  * they at least seem harmless, so just ignore them for now
5043                  */
5044                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5045                     || pkt->len_n_flags == htole32(0x55550000))) {
5046                         ADVANCE_RXQ(sc);
5047                         continue;
5048                 }
5049
5050                 switch (code) {
5051                 case IWM_REPLY_RX_PHY_CMD:
5052                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5053                         break;
5054
5055                 case IWM_REPLY_RX_MPDU_CMD:
5056                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5057                         break;
5058
5059                 case IWM_TX_CMD:
5060                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
5061                         break;
5062
5063                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5064                         struct iwm_missed_beacons_notif *resp;
5065                         int missed;
5066
5067                         /* XXX look at mac_id to determine interface ID */
5068                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5069
5070                         SYNC_RESP_STRUCT(resp, pkt);
5071                         missed = le32toh(resp->consec_missed_beacons);
5072
5073                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5074                             "%s: MISSED_BEACON: mac_id=%d, "
5075                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5076                             "num_rx=%d\n",
5077                             __func__,
5078                             le32toh(resp->mac_id),
5079                             le32toh(resp->consec_missed_beacons_since_last_rx),
5080                             le32toh(resp->consec_missed_beacons),
5081                             le32toh(resp->num_expected_beacons),
5082                             le32toh(resp->num_recvd_beacons));
5083
5084                         /* Be paranoid */
5085                         if (vap == NULL)
5086                                 break;
5087
5088                         /* XXX no net80211 locking? */
5089                         if (vap->iv_state == IEEE80211_S_RUN &&
5090                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5091                                 if (missed > vap->iv_bmissthreshold) {
5092                                         /* XXX bad locking; turn into task */
5093                                         IWM_UNLOCK(sc);
5094                                         ieee80211_beacon_miss(ic);
5095                                         IWM_LOCK(sc);
5096                                 }
5097                         }
5098
5099                         break; }
5100
5101                 case IWM_MFUART_LOAD_NOTIFICATION:
5102                         break;
5103
5104                 case IWM_MVM_ALIVE: {
5105                         struct iwm_mvm_alive_resp_v1 *resp1;
5106                         struct iwm_mvm_alive_resp_v2 *resp2;
5107                         struct iwm_mvm_alive_resp_v3 *resp3;
5108
5109                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
5110                                 SYNC_RESP_STRUCT(resp1, pkt);
5111                                 sc->sc_uc.uc_error_event_table
5112                                     = le32toh(resp1->error_event_table_ptr);
5113                                 sc->sc_uc.uc_log_event_table
5114                                     = le32toh(resp1->log_event_table_ptr);
5115                                 sc->sched_base = le32toh(resp1->scd_base_ptr);
5116                                 if (resp1->status == IWM_ALIVE_STATUS_OK)
5117                                         sc->sc_uc.uc_ok = 1;
5118                                 else
5119                                         sc->sc_uc.uc_ok = 0;
5120                         }
5121
5122                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
5123                                 SYNC_RESP_STRUCT(resp2, pkt);
5124                                 sc->sc_uc.uc_error_event_table
5125                                     = le32toh(resp2->error_event_table_ptr);
5126                                 sc->sc_uc.uc_log_event_table
5127                                     = le32toh(resp2->log_event_table_ptr);
5128                                 sc->sched_base = le32toh(resp2->scd_base_ptr);
5129                                 sc->sc_uc.uc_umac_error_event_table
5130                                     = le32toh(resp2->error_info_addr);
5131                                 if (resp2->status == IWM_ALIVE_STATUS_OK)
5132                                         sc->sc_uc.uc_ok = 1;
5133                                 else
5134                                         sc->sc_uc.uc_ok = 0;
5135                         }
5136
5137                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
5138                                 SYNC_RESP_STRUCT(resp3, pkt);
5139                                 sc->sc_uc.uc_error_event_table
5140                                     = le32toh(resp3->error_event_table_ptr);
5141                                 sc->sc_uc.uc_log_event_table
5142                                     = le32toh(resp3->log_event_table_ptr);
5143                                 sc->sched_base = le32toh(resp3->scd_base_ptr);
5144                                 sc->sc_uc.uc_umac_error_event_table
5145                                     = le32toh(resp3->error_info_addr);
5146                                 if (resp3->status == IWM_ALIVE_STATUS_OK)
5147                                         sc->sc_uc.uc_ok = 1;
5148                                 else
5149                                         sc->sc_uc.uc_ok = 0;
5150                         }
5151
5152                         sc->sc_uc.uc_intr = 1;
5153                         wakeup(&sc->sc_uc);
5154                         break; }
5155
5156                 case IWM_CALIB_RES_NOTIF_PHY_DB: {
5157                         struct iwm_calib_res_notif_phy_db *phy_db_notif;
5158                         SYNC_RESP_STRUCT(phy_db_notif, pkt);
5159
5160                         iwm_phy_db_set_section(sc, phy_db_notif);
5161
5162                         break; }
5163
5164                 case IWM_STATISTICS_NOTIFICATION: {
5165                         struct iwm_notif_statistics *stats;
5166                         SYNC_RESP_STRUCT(stats, pkt);
5167                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5168                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5169                         break; }
5170
5171                 case IWM_NVM_ACCESS_CMD:
5172                 case IWM_MCC_UPDATE_CMD:
5173                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5174                                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5175                                     BUS_DMASYNC_POSTREAD);
5176                                 memcpy(sc->sc_cmd_resp,
5177                                     pkt, sizeof(sc->sc_cmd_resp));
5178                         }
5179                         break;
5180
5181                 case IWM_MCC_CHUB_UPDATE_CMD: {
5182                         struct iwm_mcc_chub_notif *notif;
5183                         SYNC_RESP_STRUCT(notif, pkt);
5184
5185                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5186                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5187                         sc->sc_fw_mcc[2] = '\0';
5188                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5189                             "fw source %d sent CC '%s'\n",
5190                             notif->source_id, sc->sc_fw_mcc);
5191                         break; }
5192
5193                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5194                         break;
5195
5196                 case IWM_PHY_CONFIGURATION_CMD:
5197                 case IWM_TX_ANT_CONFIGURATION_CMD:
5198                 case IWM_ADD_STA:
5199                 case IWM_MAC_CONTEXT_CMD:
5200                 case IWM_REPLY_SF_CFG_CMD:
5201                 case IWM_POWER_TABLE_CMD:
5202                 case IWM_PHY_CONTEXT_CMD:
5203                 case IWM_BINDING_CONTEXT_CMD:
5204                 case IWM_TIME_EVENT_CMD:
5205                 case IWM_SCAN_REQUEST_CMD:
5206                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5207                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5208                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5209                 case IWM_REPLY_BEACON_FILTERING_CMD:
5210                 case IWM_MAC_PM_POWER_TABLE:
5211                 case IWM_TIME_QUOTA_CMD:
5212                 case IWM_REMOVE_STA:
5213                 case IWM_TXPATH_FLUSH:
5214                 case IWM_LQ_CMD:
5215                 case IWM_BT_CONFIG:
5216                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5217                         SYNC_RESP_STRUCT(cresp, pkt);
5218                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5219                                 memcpy(sc->sc_cmd_resp,
5220                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5221                         }
5222                         break;
5223
5224                 /* ignore */
5225                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5226                         break;
5227
5228                 case IWM_INIT_COMPLETE_NOTIF:
5229                         sc->sc_init_complete = 1;
5230                         wakeup(&sc->sc_init_complete);
5231                         break;
5232
5233                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5234                         struct iwm_periodic_scan_complete *notif;
5235                         SYNC_RESP_STRUCT(notif, pkt);
5236                         break;
5237                 }
5238
5239                 case IWM_SCAN_ITERATION_COMPLETE: {
5240                         struct iwm_lmac_scan_complete_notif *notif;
5241                         SYNC_RESP_STRUCT(notif, pkt);
5242                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5243                         break;
5244                 }
5245  
5246                 case IWM_SCAN_COMPLETE_UMAC: {
5247                         struct iwm_umac_scan_complete *notif;
5248                         SYNC_RESP_STRUCT(notif, pkt);
5249
5250                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5251                             "UMAC scan complete, status=0x%x\n",
5252                             notif->status);
5253 #if 0   /* XXX This would be a duplicate scan end call */
5254                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5255 #endif
5256                         break;
5257                 }
5258
5259                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5260                         struct iwm_umac_scan_iter_complete_notif *notif;
5261                         SYNC_RESP_STRUCT(notif, pkt);
5262
5263                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5264                             "complete, status=0x%x, %d channels scanned\n",
5265                             notif->status, notif->scanned_channels);
5266                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5267                         break;
5268                 }
5269
5270                 case IWM_REPLY_ERROR: {
5271                         struct iwm_error_resp *resp;
5272                         SYNC_RESP_STRUCT(resp, pkt);
5273
5274                         device_printf(sc->sc_dev,
5275                             "firmware error 0x%x, cmd 0x%x\n",
5276                             le32toh(resp->error_type),
5277                             resp->cmd_id);
5278                         break;
5279                 }
5280
5281                 case IWM_TIME_EVENT_NOTIFICATION: {
5282                         struct iwm_time_event_notif *notif;
5283                         SYNC_RESP_STRUCT(notif, pkt);
5284
5285                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5286                             "TE notif status = 0x%x action = 0x%x\n",
5287                             notif->status, notif->action);
5288                         break;
5289                 }
5290
5291                 case IWM_MCAST_FILTER_CMD:
5292                         break;
5293
5294                 case IWM_SCD_QUEUE_CFG: {
5295                         struct iwm_scd_txq_cfg_rsp *rsp;
5296                         SYNC_RESP_STRUCT(rsp, pkt);
5297
5298                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5299                             "queue cfg token=0x%x sta_id=%d "
5300                             "tid=%d scd_queue=%d\n",
5301                             rsp->token, rsp->sta_id, rsp->tid,
5302                             rsp->scd_queue);
5303                         break;
5304                 }
5305
5306                 default:
5307                         device_printf(sc->sc_dev,
5308                             "frame %d/%d %x UNHANDLED (this should "
5309                             "not happen)\n", qid, idx,
5310                             pkt->len_n_flags);
5311                         break;
5312                 }
5313
5314                 /*
5315                  * Why test bit 0x80?  The Linux driver:
5316                  *
5317                  * There is one exception:  uCode sets bit 15 when it
5318                  * originates the response/notification, i.e. when the
5319                  * response/notification is not a direct response to a
5320                  * command sent by the driver.  For example, uCode issues
5321                  * IWM_REPLY_RX when it sends a received frame to the driver;
5322                  * it is not a direct response to any driver command.
5323                  *
5324                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5325                  * uses a slightly different format for pkt->hdr, and "qid"
5326                  * is actually the upper byte of a two-byte field.
5327                  */
5328                 if (!(pkt->hdr.qid & (1 << 7))) {
5329                         iwm_cmd_done(sc, pkt);
5330                 }
5331
5332                 ADVANCE_RXQ(sc);
5333         }
5334
5335         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
5336             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5337
5338         /*
5339          * Tell the firmware what we have processed.
5340          * Seems like the hardware gets upset unless we align
5341          * the write by 8??
5342          */
5343         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5344         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5345 }
5346
5347 static void
5348 iwm_intr(void *arg)
5349 {
5350         struct iwm_softc *sc = arg;
5351         int handled = 0;
5352         int r1, r2, rv = 0;
5353         int isperiodic = 0;
5354
5355         IWM_LOCK(sc);
5356         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5357
5358         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5359                 uint32_t *ict = sc->ict_dma.vaddr;
5360                 int tmp;
5361
5362                 tmp = htole32(ict[sc->ict_cur]);
5363                 if (!tmp)
5364                         goto out_ena;
5365
5366                 /*
5367                  * ok, there was something.  keep plowing until we have all.
5368                  */
5369                 r1 = r2 = 0;
5370                 while (tmp) {
5371                         r1 |= tmp;
5372                         ict[sc->ict_cur] = 0;
5373                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5374                         tmp = htole32(ict[sc->ict_cur]);
5375                 }
5376
5377                 /* this is where the fun begins.  don't ask */
5378                 if (r1 == 0xffffffff)
5379                         r1 = 0;
5380
5381                 /* i am not expected to understand this */
5382                 if (r1 & 0xc0000)
5383                         r1 |= 0x8000;
5384                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5385         } else {
5386                 r1 = IWM_READ(sc, IWM_CSR_INT);
5387                 /* "hardware gone" (where, fishing?) */
5388                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5389                         goto out;
5390                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5391         }
5392         if (r1 == 0 && r2 == 0) {
5393                 goto out_ena;
5394         }
5395
5396         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5397
5398         /* ignored */
5399         handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
5400
5401         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5402                 int i;
5403                 struct ieee80211com *ic = &sc->sc_ic;
5404                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5405
5406 #ifdef IWM_DEBUG
5407                 iwm_nic_error(sc);
5408 #endif
5409                 /* Dump driver status (TX and RX rings) while we're here. */
5410                 device_printf(sc->sc_dev, "driver status:\n");
5411                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5412                         struct iwm_tx_ring *ring = &sc->txq[i];
5413                         device_printf(sc->sc_dev,
5414                             "  tx ring %2d: qid=%-2d cur=%-3d "
5415                             "queued=%-3d\n",
5416                             i, ring->qid, ring->cur, ring->queued);
5417                 }
5418                 device_printf(sc->sc_dev,
5419                     "  rx ring: cur=%d\n", sc->rxq.cur);
5420                 device_printf(sc->sc_dev,
5421                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5422
5423                 /* Don't stop the device; just do a VAP restart */
5424                 IWM_UNLOCK(sc);
5425
5426                 if (vap == NULL) {
5427                         printf("%s: null vap\n", __func__);
5428                         return;
5429                 }
5430
5431                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5432                     "restarting\n", __func__, vap->iv_state);
5433
5434                 /* XXX TODO: turn this into a callout/taskqueue */
5435                 ieee80211_restart_all(ic);
5436                 return;
5437         }
5438
5439         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5440                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5441                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5442                 iwm_stop(sc);
5443                 rv = 1;
5444                 goto out;
5445         }
5446
5447         /* firmware chunk loaded */
5448         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5449                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5450                 handled |= IWM_CSR_INT_BIT_FH_TX;
5451                 sc->sc_fw_chunk_done = 1;
5452                 wakeup(&sc->sc_fw);
5453         }
5454
5455         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5456                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5457                 if (iwm_check_rfkill(sc)) {
5458                         device_printf(sc->sc_dev,
5459                             "%s: rfkill switch, disabling interface\n",
5460                             __func__);
5461                         iwm_stop(sc);
5462                 }
5463         }
5464
5465         /*
5466          * The Linux driver uses periodic interrupts to avoid races.
5467          * We cargo-cult like it's going out of fashion.
5468          */
5469         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5470                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5471                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5472                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5473                         IWM_WRITE_1(sc,
5474                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5475                 isperiodic = 1;
5476         }
5477
5478         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5479                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5480                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5481
5482                 iwm_notif_intr(sc);
5483
5484                 /* enable periodic interrupt, see above */
5485                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5486                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5487                             IWM_CSR_INT_PERIODIC_ENA);
5488         }
5489
5490         if (__predict_false(r1 & ~handled))
5491                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5492                     "%s: unhandled interrupts: %x\n", __func__, r1);
5493         rv = 1;
5494
5495  out_ena:
5496         iwm_restore_interrupts(sc);
5497  out:
5498         IWM_UNLOCK(sc);
5499         return;
5500 }
5501
5502 /*
5503  * Autoconf glue-sniffing
5504  */
5505 #define PCI_VENDOR_INTEL                0x8086
5506 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5507 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5508 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5509 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5510 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5511 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5512 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5513 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5514 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5515 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5516
5517 static const struct iwm_devices {
5518         uint16_t        device;
5519         const char      *name;
5520 } iwm_devices[] = {
5521         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5522         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5523         { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5524         { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5525         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5526         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5527         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5528         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5529         { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5530         { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5531 };
5532
5533 static int
5534 iwm_probe(device_t dev)
5535 {
5536         int i;
5537
5538         for (i = 0; i < nitems(iwm_devices); i++) {
5539                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5540                     pci_get_device(dev) == iwm_devices[i].device) {
5541                         device_set_desc(dev, iwm_devices[i].name);
5542                         return (BUS_PROBE_DEFAULT);
5543                 }
5544         }
5545
5546         return (ENXIO);
5547 }
5548
5549 static int
5550 iwm_dev_check(device_t dev)
5551 {
5552         struct iwm_softc *sc;
5553
5554         sc = device_get_softc(dev);
5555
5556         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5557         switch (pci_get_device(dev)) {
5558         case PCI_PRODUCT_INTEL_WL_3160_1:
5559         case PCI_PRODUCT_INTEL_WL_3160_2:
5560                 sc->sc_fwname = "iwm3160fw";
5561                 sc->host_interrupt_operation_mode = 1;
5562                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5563                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5564                 return (0);
5565         case PCI_PRODUCT_INTEL_WL_3165_1:
5566         case PCI_PRODUCT_INTEL_WL_3165_2:
5567                 sc->sc_fwname = "iwm7265fw";
5568                 sc->host_interrupt_operation_mode = 0;
5569                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5570                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5571                 return (0);
5572         case PCI_PRODUCT_INTEL_WL_7260_1:
5573         case PCI_PRODUCT_INTEL_WL_7260_2:
5574                 sc->sc_fwname = "iwm7260fw";
5575                 sc->host_interrupt_operation_mode = 1;
5576                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5577                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5578                 return (0);
5579         case PCI_PRODUCT_INTEL_WL_7265_1:
5580         case PCI_PRODUCT_INTEL_WL_7265_2:
5581                 sc->sc_fwname = "iwm7265fw";
5582                 sc->host_interrupt_operation_mode = 0;
5583                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5584                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5585                 return (0);
5586         case PCI_PRODUCT_INTEL_WL_8260_1:
5587         case PCI_PRODUCT_INTEL_WL_8260_2:
5588                 sc->sc_fwname = "iwm8000Cfw";
5589                 sc->host_interrupt_operation_mode = 0;
5590                 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
5591                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
5592                 return (0);
5593         default:
5594                 device_printf(dev, "unknown adapter type\n");
5595                 return ENXIO;
5596         }
5597 }
5598
5599 /* PCI registers */
5600 #define PCI_CFG_RETRY_TIMEOUT   0x041
5601
5602 static int
5603 iwm_pci_attach(device_t dev)
5604 {
5605         struct iwm_softc *sc;
5606         int count, error, rid;
5607         uint16_t reg;
5608
5609         sc = device_get_softc(dev);
5610
5611         /* We disable the RETRY_TIMEOUT register (0x41) to keep
5612          * PCI Tx retries from interfering with C3 CPU state */
5613         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5614
5615         /* Enable bus-mastering and hardware bug workaround. */
5616         pci_enable_busmaster(dev);
5617         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5618         /* if !MSI */
5619         if (reg & PCIM_STATUS_INTxSTATE) {
5620                 reg &= ~PCIM_STATUS_INTxSTATE;
5621         }
5622         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5623
5624         rid = PCIR_BAR(0);
5625         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5626             RF_ACTIVE);
5627         if (sc->sc_mem == NULL) {
5628                 device_printf(sc->sc_dev, "can't map mem space\n");
5629                 return (ENXIO);
5630         }
5631         sc->sc_st = rman_get_bustag(sc->sc_mem);
5632         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5633
5634         /* Install interrupt handler. */
5635         count = 1;
5636         rid = 0;
5637         if (pci_alloc_msi(dev, &count) == 0)
5638                 rid = 1;
5639         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5640             (rid != 0 ? 0 : RF_SHAREABLE));
5641         if (sc->sc_irq == NULL) {
5642                 device_printf(dev, "can't map interrupt\n");
5643                         return (ENXIO);
5644         }
5645         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5646             NULL, iwm_intr, sc, &sc->sc_ih);
5647         if (sc->sc_ih == NULL) {
5648                 device_printf(dev, "can't establish interrupt");
5649                         return (ENXIO);
5650         }
5651         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5652
5653         return (0);
5654 }
5655
5656 static void
5657 iwm_pci_detach(device_t dev)
5658 {
5659         struct iwm_softc *sc = device_get_softc(dev);
5660
5661         if (sc->sc_irq != NULL) {
5662                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5663                 bus_release_resource(dev, SYS_RES_IRQ,
5664                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5665                 pci_release_msi(dev);
5666         }
5667         if (sc->sc_mem != NULL)
5668                 bus_release_resource(dev, SYS_RES_MEMORY,
5669                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5670 }
5671
5672
5673
5674 static int
5675 iwm_attach(device_t dev)
5676 {
5677         struct iwm_softc *sc = device_get_softc(dev);
5678         struct ieee80211com *ic = &sc->sc_ic;
5679         int error;
5680         int txq_i, i;
5681
5682         sc->sc_dev = dev;
5683         IWM_LOCK_INIT(sc);
5684         mbufq_init(&sc->sc_snd, ifqmaxlen);
5685         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5686         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5687         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5688
5689         /* PCI attach */
5690         error = iwm_pci_attach(dev);
5691         if (error != 0)
5692                 goto fail;
5693
5694         sc->sc_wantresp = -1;
5695
5696         /* Check device type */
5697         error = iwm_dev_check(dev);
5698         if (error != 0)
5699                 goto fail;
5700
5701         /*
5702          * We now start fiddling with the hardware
5703          */
5704         /*
5705          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5706          * changed, and now the revision step also includes bit 0-1 (no more
5707          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5708          * in the old format.
5709          */
5710         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
5711                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5712                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5713
5714         if (iwm_prepare_card_hw(sc) != 0) {
5715                 device_printf(dev, "could not initialize hardware\n");
5716                 goto fail;
5717         }
5718
5719         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
5720                 int ret;
5721                 uint32_t hw_step;
5722
5723                 /*
5724                  * In order to recognize C step the driver should read the
5725                  * chip version id located at the AUX bus MISC address.
5726                  */
5727                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5728                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5729                 DELAY(2);
5730
5731                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5732                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5733                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5734                                    25000);
5735                 if (!ret) {
5736                         device_printf(sc->sc_dev,
5737                             "Failed to wake up the nic\n");
5738                         goto fail;
5739                 }
5740
5741                 if (iwm_nic_lock(sc)) {
5742                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5743                         hw_step |= IWM_ENABLE_WFPM;
5744                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5745                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5746                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5747                         if (hw_step == 0x3)
5748                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5749                                                 (IWM_SILICON_C_STEP << 2);
5750                         iwm_nic_unlock(sc);
5751                 } else {
5752                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
5753                         goto fail;
5754                 }
5755         }
5756
5757         /* Allocate DMA memory for firmware transfers. */
5758         if ((error = iwm_alloc_fwmem(sc)) != 0) {
5759                 device_printf(dev, "could not allocate memory for firmware\n");
5760                 goto fail;
5761         }
5762
5763         /* Allocate "Keep Warm" page. */
5764         if ((error = iwm_alloc_kw(sc)) != 0) {
5765                 device_printf(dev, "could not allocate keep warm page\n");
5766                 goto fail;
5767         }
5768
5769         /* We use ICT interrupts */
5770         if ((error = iwm_alloc_ict(sc)) != 0) {
5771                 device_printf(dev, "could not allocate ICT table\n");
5772                 goto fail;
5773         }
5774
5775         /* Allocate TX scheduler "rings". */
5776         if ((error = iwm_alloc_sched(sc)) != 0) {
5777                 device_printf(dev, "could not allocate TX scheduler rings\n");
5778                 goto fail;
5779         }
5780
5781         /* Allocate TX rings */
5782         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5783                 if ((error = iwm_alloc_tx_ring(sc,
5784                     &sc->txq[txq_i], txq_i)) != 0) {
5785                         device_printf(dev,
5786                             "could not allocate TX ring %d\n",
5787                             txq_i);
5788                         goto fail;
5789                 }
5790         }
5791
5792         /* Allocate RX ring. */
5793         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5794                 device_printf(dev, "could not allocate RX ring\n");
5795                 goto fail;
5796         }
5797
5798         /* Clear pending interrupts. */
5799         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5800
5801         ic->ic_softc = sc;
5802         ic->ic_name = device_get_nameunit(sc->sc_dev);
5803         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
5804         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
5805
5806         /* Set device capabilities. */
5807         ic->ic_caps =
5808             IEEE80211_C_STA |
5809             IEEE80211_C_WPA |           /* WPA/RSN */
5810             IEEE80211_C_WME |
5811             IEEE80211_C_SHSLOT |        /* short slot time supported */
5812             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
5813 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
5814             ;
5815         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5816                 sc->sc_phyctxt[i].id = i;
5817                 sc->sc_phyctxt[i].color = 0;
5818                 sc->sc_phyctxt[i].ref = 0;
5819                 sc->sc_phyctxt[i].channel = NULL;
5820         }
5821
5822         /* Default noise floor */
5823         sc->sc_noise = -96;
5824
5825         /* Max RSSI */
5826         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5827
5828         sc->sc_preinit_hook.ich_func = iwm_preinit;
5829         sc->sc_preinit_hook.ich_arg = sc;
5830         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5831                 device_printf(dev, "config_intrhook_establish failed\n");
5832                 goto fail;
5833         }
5834
5835 #ifdef IWM_DEBUG
5836         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5837             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
5838             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
5839 #endif
5840
5841         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5842             "<-%s\n", __func__);
5843
5844         return 0;
5845
5846         /* Free allocated memory if something failed during attachment. */
5847 fail:
5848         iwm_detach_local(sc, 0);
5849
5850         return ENXIO;
5851 }
5852
5853 static int
5854 iwm_is_valid_ether_addr(uint8_t *addr)
5855 {
5856         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
5857
5858         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
5859                 return (FALSE);
5860
5861         return (TRUE);
5862 }
5863
5864 static int
5865 iwm_update_edca(struct ieee80211com *ic)
5866 {
5867         struct iwm_softc *sc = ic->ic_softc;
5868
5869         device_printf(sc->sc_dev, "%s: called\n", __func__);
5870         return (0);
5871 }
5872
5873 static void
5874 iwm_preinit(void *arg)
5875 {
5876         struct iwm_softc *sc = arg;
5877         device_t dev = sc->sc_dev;
5878         struct ieee80211com *ic = &sc->sc_ic;
5879         int error;
5880
5881         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5882             "->%s\n", __func__);
5883
5884         IWM_LOCK(sc);
5885         if ((error = iwm_start_hw(sc)) != 0) {
5886                 device_printf(dev, "could not initialize hardware\n");
5887                 IWM_UNLOCK(sc);
5888                 goto fail;
5889         }
5890
5891         error = iwm_run_init_mvm_ucode(sc, 1);
5892         iwm_stop_device(sc);
5893         if (error) {
5894                 IWM_UNLOCK(sc);
5895                 goto fail;
5896         }
5897         device_printf(dev,
5898             "hw rev 0x%x, fw ver %s, address %s\n",
5899             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
5900             sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
5901
5902         /* not all hardware can do 5GHz band */
5903         if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
5904                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
5905                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
5906         IWM_UNLOCK(sc);
5907
5908         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
5909             ic->ic_channels);
5910
5911         /*
5912          * At this point we've committed - if we fail to do setup,
5913          * we now also have to tear down the net80211 state.
5914          */
5915         ieee80211_ifattach(ic);
5916         ic->ic_vap_create = iwm_vap_create;
5917         ic->ic_vap_delete = iwm_vap_delete;
5918         ic->ic_raw_xmit = iwm_raw_xmit;
5919         ic->ic_node_alloc = iwm_node_alloc;
5920         ic->ic_scan_start = iwm_scan_start;
5921         ic->ic_scan_end = iwm_scan_end;
5922         ic->ic_update_mcast = iwm_update_mcast;
5923         ic->ic_getradiocaps = iwm_init_channel_map;
5924         ic->ic_set_channel = iwm_set_channel;
5925         ic->ic_scan_curchan = iwm_scan_curchan;
5926         ic->ic_scan_mindwell = iwm_scan_mindwell;
5927         ic->ic_wme.wme_update = iwm_update_edca;
5928         ic->ic_parent = iwm_parent;
5929         ic->ic_transmit = iwm_transmit;
5930         iwm_radiotap_attach(sc);
5931         if (bootverbose)
5932                 ieee80211_announce(ic);
5933
5934         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5935             "<-%s\n", __func__);
5936         config_intrhook_disestablish(&sc->sc_preinit_hook);
5937
5938         return;
5939 fail:
5940         config_intrhook_disestablish(&sc->sc_preinit_hook);
5941         iwm_detach_local(sc, 0);
5942 }
5943
5944 /*
5945  * Attach the interface to 802.11 radiotap.
5946  */
5947 static void
5948 iwm_radiotap_attach(struct iwm_softc *sc)
5949 {
5950         struct ieee80211com *ic = &sc->sc_ic;
5951
5952         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5953             "->%s begin\n", __func__);
5954         ieee80211_radiotap_attach(ic,
5955             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
5956                 IWM_TX_RADIOTAP_PRESENT,
5957             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
5958                 IWM_RX_RADIOTAP_PRESENT);
5959         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5960             "->%s end\n", __func__);
5961 }
5962
5963 static struct ieee80211vap *
5964 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
5965     enum ieee80211_opmode opmode, int flags,
5966     const uint8_t bssid[IEEE80211_ADDR_LEN],
5967     const uint8_t mac[IEEE80211_ADDR_LEN])
5968 {
5969         struct iwm_vap *ivp;
5970         struct ieee80211vap *vap;
5971
5972         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
5973                 return NULL;
5974         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
5975         vap = &ivp->iv_vap;
5976         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
5977         vap->iv_bmissthreshold = 10;            /* override default */
5978         /* Override with driver methods. */
5979         ivp->iv_newstate = vap->iv_newstate;
5980         vap->iv_newstate = iwm_newstate;
5981
5982         ieee80211_ratectl_init(vap);
5983         /* Complete setup. */
5984         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
5985             mac);
5986         ic->ic_opmode = opmode;
5987
5988         return vap;
5989 }
5990
5991 static void
5992 iwm_vap_delete(struct ieee80211vap *vap)
5993 {
5994         struct iwm_vap *ivp = IWM_VAP(vap);
5995
5996         ieee80211_ratectl_deinit(vap);
5997         ieee80211_vap_detach(vap);
5998         free(ivp, M_80211_VAP);
5999 }
6000
6001 static void
6002 iwm_scan_start(struct ieee80211com *ic)
6003 {
6004         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6005         struct iwm_softc *sc = ic->ic_softc;
6006         int error;
6007
6008         IWM_LOCK(sc);
6009         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6010                 error = iwm_mvm_umac_scan(sc);
6011         else
6012                 error = iwm_mvm_lmac_scan(sc);
6013         if (error != 0) {
6014                 device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
6015                 IWM_UNLOCK(sc);
6016                 ieee80211_cancel_scan(vap);
6017         } else {
6018                 iwm_led_blink_start(sc);
6019                 IWM_UNLOCK(sc);
6020         }
6021 }
6022
6023 static void
6024 iwm_scan_end(struct ieee80211com *ic)
6025 {
6026         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6027         struct iwm_softc *sc = ic->ic_softc;
6028
6029         IWM_LOCK(sc);
6030         iwm_led_blink_stop(sc);
6031         if (vap->iv_state == IEEE80211_S_RUN)
6032                 iwm_mvm_led_enable(sc);
6033         IWM_UNLOCK(sc);
6034 }
6035
6036 static void
6037 iwm_update_mcast(struct ieee80211com *ic)
6038 {
6039 }
6040
6041 static void
6042 iwm_set_channel(struct ieee80211com *ic)
6043 {
6044 }
6045
6046 static void
6047 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6048 {
6049 }
6050
6051 static void
6052 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6053 {
6054         return;
6055 }
6056
6057 void
6058 iwm_init_task(void *arg1)
6059 {
6060         struct iwm_softc *sc = arg1;
6061
6062         IWM_LOCK(sc);
6063         while (sc->sc_flags & IWM_FLAG_BUSY)
6064                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6065         sc->sc_flags |= IWM_FLAG_BUSY;
6066         iwm_stop(sc);
6067         if (sc->sc_ic.ic_nrunning > 0)
6068                 iwm_init(sc);
6069         sc->sc_flags &= ~IWM_FLAG_BUSY;
6070         wakeup(&sc->sc_flags);
6071         IWM_UNLOCK(sc);
6072 }
6073
6074 static int
6075 iwm_resume(device_t dev)
6076 {
6077         struct iwm_softc *sc = device_get_softc(dev);
6078         int do_reinit = 0;
6079
6080         /*
6081          * We disable the RETRY_TIMEOUT register (0x41) to keep
6082          * PCI Tx retries from interfering with C3 CPU state.
6083          */
6084         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6085         iwm_init_task(device_get_softc(dev));
6086
6087         IWM_LOCK(sc);
6088         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6089                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6090                 do_reinit = 1;
6091         }
6092         IWM_UNLOCK(sc);
6093
6094         if (do_reinit)
6095                 ieee80211_resume_all(&sc->sc_ic);
6096
6097         return 0;
6098 }
6099
6100 static int
6101 iwm_suspend(device_t dev)
6102 {
6103         int do_stop = 0;
6104         struct iwm_softc *sc = device_get_softc(dev);
6105
6106         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6107
6108         ieee80211_suspend_all(&sc->sc_ic);
6109
6110         if (do_stop) {
6111                 IWM_LOCK(sc);
6112                 iwm_stop(sc);
6113                 sc->sc_flags |= IWM_FLAG_SCANNING;
6114                 IWM_UNLOCK(sc);
6115         }
6116
6117         return (0);
6118 }
6119
6120 static int
6121 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6122 {
6123         struct iwm_fw_info *fw = &sc->sc_fw;
6124         device_t dev = sc->sc_dev;
6125         int i;
6126
6127         ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6128
6129         callout_drain(&sc->sc_led_blink_to);
6130         callout_drain(&sc->sc_watchdog_to);
6131         iwm_stop_device(sc);
6132         if (do_net80211) {
6133                 ieee80211_ifdetach(&sc->sc_ic);
6134         }
6135
6136         iwm_phy_db_free(sc);
6137
6138         /* Free descriptor rings */
6139         iwm_free_rx_ring(sc, &sc->rxq);
6140         for (i = 0; i < nitems(sc->txq); i++)
6141                 iwm_free_tx_ring(sc, &sc->txq[i]);
6142
6143         /* Free firmware */
6144         if (fw->fw_fp != NULL)
6145                 iwm_fw_info_free(fw);
6146
6147         /* Free scheduler */
6148         iwm_dma_contig_free(&sc->sched_dma);
6149         iwm_dma_contig_free(&sc->ict_dma);
6150         iwm_dma_contig_free(&sc->kw_dma);
6151         iwm_dma_contig_free(&sc->fw_dma);
6152
6153         /* Finished with the hardware - detach things */
6154         iwm_pci_detach(dev);
6155
6156         mbufq_drain(&sc->sc_snd);
6157         IWM_LOCK_DESTROY(sc);
6158
6159         return (0);
6160 }
6161
6162 static int
6163 iwm_detach(device_t dev)
6164 {
6165         struct iwm_softc *sc = device_get_softc(dev);
6166
6167         return (iwm_detach_local(sc, 1));
6168 }
6169
6170 static device_method_t iwm_pci_methods[] = {
6171         /* Device interface */
6172         DEVMETHOD(device_probe,         iwm_probe),
6173         DEVMETHOD(device_attach,        iwm_attach),
6174         DEVMETHOD(device_detach,        iwm_detach),
6175         DEVMETHOD(device_suspend,       iwm_suspend),
6176         DEVMETHOD(device_resume,        iwm_resume),
6177
6178         DEVMETHOD_END
6179 };
6180
6181 static driver_t iwm_pci_driver = {
6182         "iwm",
6183         iwm_pci_methods,
6184         sizeof (struct iwm_softc)
6185 };
6186
6187 static devclass_t iwm_devclass;
6188
6189 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6190 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6191 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6192 MODULE_DEPEND(iwm, wlan, 1, 1, 1);