]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
[iwm] Fix off-by-one check in iwm_read_firmware().
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109
110 #include <sys/param.h>
111 #include <sys/bus.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
126
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
130
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
133
134 #include <net/bpf.h>
135
136 #include <net/if.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
142
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
147
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
152
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_util.h>
157 #include <dev/iwm/if_iwm_binding.h>
158 #include <dev/iwm/if_iwm_phy_db.h>
159 #include <dev/iwm/if_iwm_mac_ctxt.h>
160 #include <dev/iwm/if_iwm_phy_ctxt.h>
161 #include <dev/iwm/if_iwm_time_event.h>
162 #include <dev/iwm/if_iwm_power.h>
163 #include <dev/iwm/if_iwm_scan.h>
164
165 #include <dev/iwm/if_iwm_pcie_trans.h>
166 #include <dev/iwm/if_iwm_led.h>
167
168 const uint8_t iwm_nvm_channels[] = {
169         /* 2.4 GHz */
170         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
171         /* 5 GHz */
172         36, 40, 44, 48, 52, 56, 60, 64,
173         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
174         149, 153, 157, 161, 165
175 };
176 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
177     "IWM_NUM_CHANNELS is too small");
178
179 const uint8_t iwm_nvm_channels_8000[] = {
180         /* 2.4 GHz */
181         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
182         /* 5 GHz */
183         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
184         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
185         149, 153, 157, 161, 165, 169, 173, 177, 181
186 };
187 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
188     "IWM_NUM_CHANNELS_8000 is too small");
189
190 #define IWM_NUM_2GHZ_CHANNELS   14
191 #define IWM_N_HW_ADDR_MASK      0xF
192
193 /*
194  * XXX For now, there's simply a fixed set of rate table entries
195  * that are populated.
196  */
197 const struct iwm_rate {
198         uint8_t rate;
199         uint8_t plcp;
200 } iwm_rates[] = {
201         {   2,  IWM_RATE_1M_PLCP  },
202         {   4,  IWM_RATE_2M_PLCP  },
203         {  11,  IWM_RATE_5M_PLCP  },
204         {  22,  IWM_RATE_11M_PLCP },
205         {  12,  IWM_RATE_6M_PLCP  },
206         {  18,  IWM_RATE_9M_PLCP  },
207         {  24,  IWM_RATE_12M_PLCP },
208         {  36,  IWM_RATE_18M_PLCP },
209         {  48,  IWM_RATE_24M_PLCP },
210         {  72,  IWM_RATE_36M_PLCP },
211         {  96,  IWM_RATE_48M_PLCP },
212         { 108,  IWM_RATE_54M_PLCP },
213 };
214 #define IWM_RIDX_CCK    0
215 #define IWM_RIDX_OFDM   4
216 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
217 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
218 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
219
220 struct iwm_nvm_section {
221         uint16_t length;
222         uint8_t *data;
223 };
224
225 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
226 static int      iwm_firmware_store_section(struct iwm_softc *,
227                                            enum iwm_ucode_type,
228                                            const uint8_t *, size_t);
229 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
230 static void     iwm_fw_info_free(struct iwm_fw_info *);
231 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
232 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
233 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
234                                      bus_size_t, bus_size_t);
235 static void     iwm_dma_contig_free(struct iwm_dma_info *);
236 static int      iwm_alloc_fwmem(struct iwm_softc *);
237 static void     iwm_free_fwmem(struct iwm_softc *);
238 static int      iwm_alloc_sched(struct iwm_softc *);
239 static void     iwm_free_sched(struct iwm_softc *);
240 static int      iwm_alloc_kw(struct iwm_softc *);
241 static void     iwm_free_kw(struct iwm_softc *);
242 static int      iwm_alloc_ict(struct iwm_softc *);
243 static void     iwm_free_ict(struct iwm_softc *);
244 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
245 static void     iwm_disable_rx_dma(struct iwm_softc *);
246 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
247 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
248 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
249                                   int);
250 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
251 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
252 static void     iwm_enable_interrupts(struct iwm_softc *);
253 static void     iwm_restore_interrupts(struct iwm_softc *);
254 static void     iwm_disable_interrupts(struct iwm_softc *);
255 static void     iwm_ict_reset(struct iwm_softc *);
256 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
257 static void     iwm_stop_device(struct iwm_softc *);
258 static void     iwm_mvm_nic_config(struct iwm_softc *);
259 static int      iwm_nic_rx_init(struct iwm_softc *);
260 static int      iwm_nic_tx_init(struct iwm_softc *);
261 static int      iwm_nic_init(struct iwm_softc *);
262 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
263 static int      iwm_post_alive(struct iwm_softc *);
264 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
265                                    uint16_t, uint8_t *, uint16_t *);
266 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
267                                      uint16_t *, size_t);
268 static uint32_t iwm_eeprom_channel_flags(uint16_t);
269 static void     iwm_add_channel_band(struct iwm_softc *,
270                     struct ieee80211_channel[], int, int *, int, size_t,
271                     const uint8_t[]);
272 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
273                     struct ieee80211_channel[]);
274 static int      iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
275                                    const uint16_t *, const uint16_t *,
276                                    const uint16_t *, const uint16_t *,
277                                    const uint16_t *);
278 static void     iwm_set_hw_address_8000(struct iwm_softc *,
279                                         struct iwm_nvm_data *,
280                                         const uint16_t *, const uint16_t *);
281 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
282                             const uint16_t *);
283 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
284 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
285                                   const uint16_t *);
286 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
287                                    const uint16_t *);
288 static void     iwm_set_radio_cfg(const struct iwm_softc *,
289                                   struct iwm_nvm_data *, uint32_t);
290 static int      iwm_parse_nvm_sections(struct iwm_softc *,
291                                        struct iwm_nvm_section *);
292 static int      iwm_nvm_init(struct iwm_softc *);
293 static int      iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
294                                        const uint8_t *, uint32_t);
295 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
296                                         const uint8_t *, uint32_t);
297 static int      iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
298 static int      iwm_load_cpu_sections_8000(struct iwm_softc *,
299                                            struct iwm_fw_sects *, int , int *);
300 static int      iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
301 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
302 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
303 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
304 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
305 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
306                                               enum iwm_ucode_type);
307 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
308 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
309 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
310 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
311                                             struct iwm_rx_phy_info *);
312 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
313                                       struct iwm_rx_packet *,
314                                       struct iwm_rx_data *);
315 static int      iwm_get_noise(struct iwm_softc *sc,
316                     const struct iwm_mvm_statistics_rx_non_phy *);
317 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
318                                    struct iwm_rx_data *);
319 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
320                                          struct iwm_rx_packet *,
321                                          struct iwm_node *);
322 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
323                                   struct iwm_rx_data *);
324 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
325 #if 0
326 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
327                                  uint16_t);
328 #endif
329 static const struct iwm_rate *
330         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
331                         struct ieee80211_frame *, struct iwm_tx_cmd *);
332 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
333                        struct ieee80211_node *, int);
334 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
335                              const struct ieee80211_bpf_params *);
336 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
337                                                 struct iwm_mvm_add_sta_cmd_v7 *,
338                                                 int *);
339 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
340                                        int);
341 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
342 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
343 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
344                                            struct iwm_int_sta *,
345                                            const uint8_t *, uint16_t, uint16_t);
346 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
347 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
348 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
349 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
350 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
351 static struct ieee80211_node *
352                 iwm_node_alloc(struct ieee80211vap *,
353                                const uint8_t[IEEE80211_ADDR_LEN]);
354 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
355 static int      iwm_media_change(struct ifnet *);
356 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
357 static void     iwm_endscan_cb(void *, int);
358 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
359                                         struct iwm_sf_cfg_cmd *,
360                                         struct ieee80211_node *);
361 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
362 static int      iwm_send_bt_init_conf(struct iwm_softc *);
363 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
364 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
365 static int      iwm_init_hw(struct iwm_softc *);
366 static void     iwm_init(struct iwm_softc *);
367 static void     iwm_start(struct iwm_softc *);
368 static void     iwm_stop(struct iwm_softc *);
369 static void     iwm_watchdog(void *);
370 static void     iwm_parent(struct ieee80211com *);
371 #ifdef IWM_DEBUG
372 static const char *
373                 iwm_desc_lookup(uint32_t);
374 static void     iwm_nic_error(struct iwm_softc *);
375 static void     iwm_nic_umac_error(struct iwm_softc *);
376 #endif
377 static void     iwm_notif_intr(struct iwm_softc *);
378 static void     iwm_intr(void *);
379 static int      iwm_attach(device_t);
380 static int      iwm_is_valid_ether_addr(uint8_t *);
381 static void     iwm_preinit(void *);
382 static int      iwm_detach_local(struct iwm_softc *sc, int);
383 static void     iwm_init_task(void *);
384 static void     iwm_radiotap_attach(struct iwm_softc *);
385 static struct ieee80211vap *
386                 iwm_vap_create(struct ieee80211com *,
387                                const char [IFNAMSIZ], int,
388                                enum ieee80211_opmode, int,
389                                const uint8_t [IEEE80211_ADDR_LEN],
390                                const uint8_t [IEEE80211_ADDR_LEN]);
391 static void     iwm_vap_delete(struct ieee80211vap *);
392 static void     iwm_scan_start(struct ieee80211com *);
393 static void     iwm_scan_end(struct ieee80211com *);
394 static void     iwm_update_mcast(struct ieee80211com *);
395 static void     iwm_set_channel(struct ieee80211com *);
396 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
397 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
398 static int      iwm_detach(device_t);
399
400 /*
401  * Firmware parser.
402  */
403
404 static int
405 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
406 {
407         const struct iwm_fw_cscheme_list *l = (const void *)data;
408
409         if (dlen < sizeof(*l) ||
410             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
411                 return EINVAL;
412
413         /* we don't actually store anything for now, always use s/w crypto */
414
415         return 0;
416 }
417
418 static int
419 iwm_firmware_store_section(struct iwm_softc *sc,
420     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
421 {
422         struct iwm_fw_sects *fws;
423         struct iwm_fw_onesect *fwone;
424
425         if (type >= IWM_UCODE_TYPE_MAX)
426                 return EINVAL;
427         if (dlen < sizeof(uint32_t))
428                 return EINVAL;
429
430         fws = &sc->sc_fw.fw_sects[type];
431         if (fws->fw_count >= IWM_UCODE_SECT_MAX)
432                 return EINVAL;
433
434         fwone = &fws->fw_sect[fws->fw_count];
435
436         /* first 32bit are device load offset */
437         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
438
439         /* rest is data */
440         fwone->fws_data = data + sizeof(uint32_t);
441         fwone->fws_len = dlen - sizeof(uint32_t);
442
443         fws->fw_count++;
444         fws->fw_totlen += fwone->fws_len;
445
446         return 0;
447 }
448
449 /* iwlwifi: iwl-drv.c */
450 struct iwm_tlv_calib_data {
451         uint32_t ucode_type;
452         struct iwm_tlv_calib_ctrl calib;
453 } __packed;
454
455 static int
456 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
457 {
458         const struct iwm_tlv_calib_data *def_calib = data;
459         uint32_t ucode_type = le32toh(def_calib->ucode_type);
460
461         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
462                 device_printf(sc->sc_dev,
463                     "Wrong ucode_type %u for default "
464                     "calibration.\n", ucode_type);
465                 return EINVAL;
466         }
467
468         sc->sc_default_calib[ucode_type].flow_trigger =
469             def_calib->calib.flow_trigger;
470         sc->sc_default_calib[ucode_type].event_trigger =
471             def_calib->calib.event_trigger;
472
473         return 0;
474 }
475
476 static void
477 iwm_fw_info_free(struct iwm_fw_info *fw)
478 {
479         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
480         fw->fw_fp = NULL;
481         /* don't touch fw->fw_status */
482         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
483 }
484
485 static int
486 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
487 {
488         struct iwm_fw_info *fw = &sc->sc_fw;
489         const struct iwm_tlv_ucode_header *uhdr;
490         struct iwm_ucode_tlv tlv;
491         enum iwm_ucode_tlv_type tlv_type;
492         const struct firmware *fwp;
493         const uint8_t *data;
494         int error = 0;
495         size_t len;
496
497         if (fw->fw_status == IWM_FW_STATUS_DONE &&
498             ucode_type != IWM_UCODE_TYPE_INIT)
499                 return 0;
500
501         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
502                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
503         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
504
505         if (fw->fw_fp != NULL)
506                 iwm_fw_info_free(fw);
507
508         /*
509          * Load firmware into driver memory.
510          * fw_fp will be set.
511          */
512         IWM_UNLOCK(sc);
513         fwp = firmware_get(sc->sc_fwname);
514         IWM_LOCK(sc);
515         if (fwp == NULL) {
516                 device_printf(sc->sc_dev,
517                     "could not read firmware %s (error %d)\n",
518                     sc->sc_fwname, error);
519                 goto out;
520         }
521         fw->fw_fp = fwp;
522
523         /* (Re-)Initialize default values. */
524         sc->sc_capaflags = 0;
525         sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
526         memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
527         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
528
529         /*
530          * Parse firmware contents
531          */
532
533         uhdr = (const void *)fw->fw_fp->data;
534         if (*(const uint32_t *)fw->fw_fp->data != 0
535             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
536                 device_printf(sc->sc_dev, "invalid firmware %s\n",
537                     sc->sc_fwname);
538                 error = EINVAL;
539                 goto out;
540         }
541
542         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
543             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
544             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
545             IWM_UCODE_API(le32toh(uhdr->ver)));
546         data = uhdr->data;
547         len = fw->fw_fp->datasize - sizeof(*uhdr);
548
549         while (len >= sizeof(tlv)) {
550                 size_t tlv_len;
551                 const void *tlv_data;
552
553                 memcpy(&tlv, data, sizeof(tlv));
554                 tlv_len = le32toh(tlv.length);
555                 tlv_type = le32toh(tlv.type);
556
557                 len -= sizeof(tlv);
558                 data += sizeof(tlv);
559                 tlv_data = data;
560
561                 if (len < tlv_len) {
562                         device_printf(sc->sc_dev,
563                             "firmware too short: %zu bytes\n",
564                             len);
565                         error = EINVAL;
566                         goto parse_out;
567                 }
568
569                 switch ((int)tlv_type) {
570                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
571                         if (tlv_len < sizeof(uint32_t)) {
572                                 device_printf(sc->sc_dev,
573                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
574                                     __func__,
575                                     (int) tlv_len);
576                                 error = EINVAL;
577                                 goto parse_out;
578                         }
579                         sc->sc_capa_max_probe_len
580                             = le32toh(*(const uint32_t *)tlv_data);
581                         /* limit it to something sensible */
582                         if (sc->sc_capa_max_probe_len >
583                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
584                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
585                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
586                                     "ridiculous\n", __func__);
587                                 error = EINVAL;
588                                 goto parse_out;
589                         }
590                         break;
591                 case IWM_UCODE_TLV_PAN:
592                         if (tlv_len) {
593                                 device_printf(sc->sc_dev,
594                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
595                                     __func__,
596                                     (int) tlv_len);
597                                 error = EINVAL;
598                                 goto parse_out;
599                         }
600                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
601                         break;
602                 case IWM_UCODE_TLV_FLAGS:
603                         if (tlv_len < sizeof(uint32_t)) {
604                                 device_printf(sc->sc_dev,
605                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
606                                     __func__,
607                                     (int) tlv_len);
608                                 error = EINVAL;
609                                 goto parse_out;
610                         }
611                         /*
612                          * Apparently there can be many flags, but Linux driver
613                          * parses only the first one, and so do we.
614                          *
615                          * XXX: why does this override IWM_UCODE_TLV_PAN?
616                          * Intentional or a bug?  Observations from
617                          * current firmware file:
618                          *  1) TLV_PAN is parsed first
619                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
620                          * ==> this resets TLV_PAN to itself... hnnnk
621                          */
622                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
623                         break;
624                 case IWM_UCODE_TLV_CSCHEME:
625                         if ((error = iwm_store_cscheme(sc,
626                             tlv_data, tlv_len)) != 0) {
627                                 device_printf(sc->sc_dev,
628                                     "%s: iwm_store_cscheme(): returned %d\n",
629                                     __func__,
630                                     error);
631                                 goto parse_out;
632                         }
633                         break;
634                 case IWM_UCODE_TLV_NUM_OF_CPU: {
635                         uint32_t num_cpu;
636                         if (tlv_len != sizeof(uint32_t)) {
637                                 device_printf(sc->sc_dev,
638                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
639                                     __func__,
640                                     (int) tlv_len);
641                                 error = EINVAL;
642                                 goto parse_out;
643                         }
644                         num_cpu = le32toh(*(const uint32_t *)tlv_data);
645                         if (num_cpu < 1 || num_cpu > 2) {
646                                 device_printf(sc->sc_dev,
647                                     "%s: Driver supports only 1 or 2 CPUs\n",
648                                     __func__);
649                                 error = EINVAL;
650                                 goto parse_out;
651                         }
652                         break;
653                 }
654                 case IWM_UCODE_TLV_SEC_RT:
655                         if ((error = iwm_firmware_store_section(sc,
656                             IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
657                                 device_printf(sc->sc_dev,
658                                     "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
659                                     __func__,
660                                     error);
661                                 goto parse_out;
662                         }
663                         break;
664                 case IWM_UCODE_TLV_SEC_INIT:
665                         if ((error = iwm_firmware_store_section(sc,
666                             IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
667                                 device_printf(sc->sc_dev,
668                                     "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
669                                     __func__,
670                                     error);
671                                 goto parse_out;
672                         }
673                         break;
674                 case IWM_UCODE_TLV_SEC_WOWLAN:
675                         if ((error = iwm_firmware_store_section(sc,
676                             IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
677                                 device_printf(sc->sc_dev,
678                                     "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
679                                     __func__,
680                                     error);
681                                 goto parse_out;
682                         }
683                         break;
684                 case IWM_UCODE_TLV_DEF_CALIB:
685                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
686                                 device_printf(sc->sc_dev,
687                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
688                                     __func__,
689                                     (int) tlv_len,
690                                     (int) sizeof(struct iwm_tlv_calib_data));
691                                 error = EINVAL;
692                                 goto parse_out;
693                         }
694                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
695                                 device_printf(sc->sc_dev,
696                                     "%s: iwm_set_default_calib() failed: %d\n",
697                                     __func__,
698                                     error);
699                                 goto parse_out;
700                         }
701                         break;
702                 case IWM_UCODE_TLV_PHY_SKU:
703                         if (tlv_len != sizeof(uint32_t)) {
704                                 error = EINVAL;
705                                 device_printf(sc->sc_dev,
706                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
707                                     __func__,
708                                     (int) tlv_len);
709                                 goto parse_out;
710                         }
711                         sc->sc_fw_phy_config =
712                             le32toh(*(const uint32_t *)tlv_data);
713                         break;
714
715                 case IWM_UCODE_TLV_API_CHANGES_SET: {
716                         const struct iwm_ucode_api *api;
717                         if (tlv_len != sizeof(*api)) {
718                                 error = EINVAL;
719                                 goto parse_out;
720                         }
721                         api = (const struct iwm_ucode_api *)tlv_data;
722                         /* Flags may exceed 32 bits in future firmware. */
723                         if (le32toh(api->api_index) > 0) {
724                                 device_printf(sc->sc_dev,
725                                     "unsupported API index %d\n",
726                                     le32toh(api->api_index));
727                                 goto parse_out;
728                         }
729                         sc->sc_ucode_api = le32toh(api->api_flags);
730                         break;
731                 }
732
733                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
734                         const struct iwm_ucode_capa *capa;
735                         int idx, i;
736                         if (tlv_len != sizeof(*capa)) {
737                                 error = EINVAL;
738                                 goto parse_out;
739                         }
740                         capa = (const struct iwm_ucode_capa *)tlv_data;
741                         idx = le32toh(capa->api_index);
742                         if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
743                                 device_printf(sc->sc_dev,
744                                     "unsupported API index %d\n", idx);
745                                 goto parse_out;
746                         }
747                         for (i = 0; i < 32; i++) {
748                                 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
749                                         continue;
750                                 setbit(sc->sc_enabled_capa, i + (32 * idx));
751                         }
752                         break;
753                 }
754
755                 case 48: /* undocumented TLV */
756                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
757                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
758                         /* ignore, not used by current driver */
759                         break;
760
761                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
762                         if ((error = iwm_firmware_store_section(sc,
763                             IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
764                             tlv_len)) != 0)
765                                 goto parse_out;
766                         break;
767
768                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
769                         if (tlv_len != sizeof(uint32_t)) {
770                                 error = EINVAL;
771                                 goto parse_out;
772                         }
773                         sc->sc_capa_n_scan_channels =
774                           le32toh(*(const uint32_t *)tlv_data);
775                         break;
776
777                 case IWM_UCODE_TLV_FW_VERSION:
778                         if (tlv_len != sizeof(uint32_t) * 3) {
779                                 error = EINVAL;
780                                 goto parse_out;
781                         }
782                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
783                             "%d.%d.%d",
784                             le32toh(((const uint32_t *)tlv_data)[0]),
785                             le32toh(((const uint32_t *)tlv_data)[1]),
786                             le32toh(((const uint32_t *)tlv_data)[2]));
787                         break;
788
789                 default:
790                         device_printf(sc->sc_dev,
791                             "%s: unknown firmware section %d, abort\n",
792                             __func__, tlv_type);
793                         error = EINVAL;
794                         goto parse_out;
795                 }
796
797                 len -= roundup(tlv_len, 4);
798                 data += roundup(tlv_len, 4);
799         }
800
801         KASSERT(error == 0, ("unhandled error"));
802
803  parse_out:
804         if (error) {
805                 device_printf(sc->sc_dev, "firmware parse error %d, "
806                     "section type %d\n", error, tlv_type);
807         }
808
809         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
810                 device_printf(sc->sc_dev,
811                     "device uses unsupported power ops\n");
812                 error = ENOTSUP;
813         }
814
815  out:
816         if (error) {
817                 fw->fw_status = IWM_FW_STATUS_NONE;
818                 if (fw->fw_fp != NULL)
819                         iwm_fw_info_free(fw);
820         } else
821                 fw->fw_status = IWM_FW_STATUS_DONE;
822         wakeup(&sc->sc_fw);
823
824         return error;
825 }
826
827 /*
828  * DMA resource routines
829  */
830
831 static void
832 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
833 {
834         if (error != 0)
835                 return;
836         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
837         *(bus_addr_t *)arg = segs[0].ds_addr;
838 }
839
840 static int
841 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
842     bus_size_t size, bus_size_t alignment)
843 {
844         int error;
845
846         dma->tag = NULL;
847         dma->map = NULL;
848         dma->size = size;
849         dma->vaddr = NULL;
850
851         error = bus_dma_tag_create(tag, alignment,
852             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
853             1, size, 0, NULL, NULL, &dma->tag);
854         if (error != 0)
855                 goto fail;
856
857         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
858             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
859         if (error != 0)
860                 goto fail;
861
862         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
863             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
864         if (error != 0) {
865                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
866                 dma->vaddr = NULL;
867                 goto fail;
868         }
869
870         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
871
872         return 0;
873
874 fail:
875         iwm_dma_contig_free(dma);
876
877         return error;
878 }
879
880 static void
881 iwm_dma_contig_free(struct iwm_dma_info *dma)
882 {
883         if (dma->vaddr != NULL) {
884                 bus_dmamap_sync(dma->tag, dma->map,
885                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
886                 bus_dmamap_unload(dma->tag, dma->map);
887                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
888                 dma->vaddr = NULL;
889         }
890         if (dma->tag != NULL) {
891                 bus_dma_tag_destroy(dma->tag);
892                 dma->tag = NULL;
893         }
894 }
895
896 /* fwmem is used to load firmware onto the card */
897 static int
898 iwm_alloc_fwmem(struct iwm_softc *sc)
899 {
900         /* Must be aligned on a 16-byte boundary. */
901         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
902             sc->sc_fwdmasegsz, 16);
903 }
904
905 static void
906 iwm_free_fwmem(struct iwm_softc *sc)
907 {
908         iwm_dma_contig_free(&sc->fw_dma);
909 }
910
911 /* tx scheduler rings.  not used? */
912 static int
913 iwm_alloc_sched(struct iwm_softc *sc)
914 {
915         /* TX scheduler rings must be aligned on a 1KB boundary. */
916         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
917             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
918 }
919
920 static void
921 iwm_free_sched(struct iwm_softc *sc)
922 {
923         iwm_dma_contig_free(&sc->sched_dma);
924 }
925
926 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
927 static int
928 iwm_alloc_kw(struct iwm_softc *sc)
929 {
930         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
931 }
932
933 static void
934 iwm_free_kw(struct iwm_softc *sc)
935 {
936         iwm_dma_contig_free(&sc->kw_dma);
937 }
938
939 /* interrupt cause table */
940 static int
941 iwm_alloc_ict(struct iwm_softc *sc)
942 {
943         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
944             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
945 }
946
947 static void
948 iwm_free_ict(struct iwm_softc *sc)
949 {
950         iwm_dma_contig_free(&sc->ict_dma);
951 }
952
953 static int
954 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
955 {
956         bus_size_t size;
957         int i, error;
958
959         ring->cur = 0;
960
961         /* Allocate RX descriptors (256-byte aligned). */
962         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
963         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
964         if (error != 0) {
965                 device_printf(sc->sc_dev,
966                     "could not allocate RX ring DMA memory\n");
967                 goto fail;
968         }
969         ring->desc = ring->desc_dma.vaddr;
970
971         /* Allocate RX status area (16-byte aligned). */
972         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
973             sizeof(*ring->stat), 16);
974         if (error != 0) {
975                 device_printf(sc->sc_dev,
976                     "could not allocate RX status DMA memory\n");
977                 goto fail;
978         }
979         ring->stat = ring->stat_dma.vaddr;
980
981         /* Create RX buffer DMA tag. */
982         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
983             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
984             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
985         if (error != 0) {
986                 device_printf(sc->sc_dev,
987                     "%s: could not create RX buf DMA tag, error %d\n",
988                     __func__, error);
989                 goto fail;
990         }
991
992         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
993         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
994         if (error != 0) {
995                 device_printf(sc->sc_dev,
996                     "%s: could not create RX buf DMA map, error %d\n",
997                     __func__, error);
998                 goto fail;
999         }
1000         /*
1001          * Allocate and map RX buffers.
1002          */
1003         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1004                 struct iwm_rx_data *data = &ring->data[i];
1005                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1006                 if (error != 0) {
1007                         device_printf(sc->sc_dev,
1008                             "%s: could not create RX buf DMA map, error %d\n",
1009                             __func__, error);
1010                         goto fail;
1011                 }
1012                 data->m = NULL;
1013
1014                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1015                         goto fail;
1016                 }
1017         }
1018         return 0;
1019
1020 fail:   iwm_free_rx_ring(sc, ring);
1021         return error;
1022 }
1023
1024 static void
1025 iwm_disable_rx_dma(struct iwm_softc *sc)
1026 {
1027         /* XXX conditional nic locks are stupid */
1028         /* XXX print out if we can't lock the NIC? */
1029         if (iwm_nic_lock(sc)) {
1030                 /* XXX handle if RX stop doesn't finish? */
1031                 (void) iwm_pcie_rx_stop(sc);
1032                 iwm_nic_unlock(sc);
1033         }
1034 }
1035
1036 static void
1037 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1038 {
1039         /* Reset the ring state */
1040         ring->cur = 0;
1041
1042         /*
1043          * The hw rx ring index in shared memory must also be cleared,
1044          * otherwise the discrepancy can cause reprocessing chaos.
1045          */
1046         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1047 }
1048
1049 static void
1050 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1051 {
1052         int i;
1053
1054         iwm_dma_contig_free(&ring->desc_dma);
1055         iwm_dma_contig_free(&ring->stat_dma);
1056
1057         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1058                 struct iwm_rx_data *data = &ring->data[i];
1059
1060                 if (data->m != NULL) {
1061                         bus_dmamap_sync(ring->data_dmat, data->map,
1062                             BUS_DMASYNC_POSTREAD);
1063                         bus_dmamap_unload(ring->data_dmat, data->map);
1064                         m_freem(data->m);
1065                         data->m = NULL;
1066                 }
1067                 if (data->map != NULL) {
1068                         bus_dmamap_destroy(ring->data_dmat, data->map);
1069                         data->map = NULL;
1070                 }
1071         }
1072         if (ring->spare_map != NULL) {
1073                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1074                 ring->spare_map = NULL;
1075         }
1076         if (ring->data_dmat != NULL) {
1077                 bus_dma_tag_destroy(ring->data_dmat);
1078                 ring->data_dmat = NULL;
1079         }
1080 }
1081
1082 static int
1083 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1084 {
1085         bus_addr_t paddr;
1086         bus_size_t size;
1087         size_t maxsize;
1088         int nsegments;
1089         int i, error;
1090
1091         ring->qid = qid;
1092         ring->queued = 0;
1093         ring->cur = 0;
1094
1095         /* Allocate TX descriptors (256-byte aligned). */
1096         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1097         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1098         if (error != 0) {
1099                 device_printf(sc->sc_dev,
1100                     "could not allocate TX ring DMA memory\n");
1101                 goto fail;
1102         }
1103         ring->desc = ring->desc_dma.vaddr;
1104
1105         /*
1106          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1107          * to allocate commands space for other rings.
1108          */
1109         if (qid > IWM_MVM_CMD_QUEUE)
1110                 return 0;
1111
1112         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1113         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1114         if (error != 0) {
1115                 device_printf(sc->sc_dev,
1116                     "could not allocate TX cmd DMA memory\n");
1117                 goto fail;
1118         }
1119         ring->cmd = ring->cmd_dma.vaddr;
1120
1121         /* FW commands may require more mapped space than packets. */
1122         if (qid == IWM_MVM_CMD_QUEUE) {
1123                 maxsize = IWM_RBUF_SIZE;
1124                 nsegments = 1;
1125         } else {
1126                 maxsize = MCLBYTES;
1127                 nsegments = IWM_MAX_SCATTER - 2;
1128         }
1129
1130         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1131             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1132             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1133         if (error != 0) {
1134                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1135                 goto fail;
1136         }
1137
1138         paddr = ring->cmd_dma.paddr;
1139         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1140                 struct iwm_tx_data *data = &ring->data[i];
1141
1142                 data->cmd_paddr = paddr;
1143                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1144                     + offsetof(struct iwm_tx_cmd, scratch);
1145                 paddr += sizeof(struct iwm_device_cmd);
1146
1147                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1148                 if (error != 0) {
1149                         device_printf(sc->sc_dev,
1150                             "could not create TX buf DMA map\n");
1151                         goto fail;
1152                 }
1153         }
1154         KASSERT(paddr == ring->cmd_dma.paddr + size,
1155             ("invalid physical address"));
1156         return 0;
1157
1158 fail:   iwm_free_tx_ring(sc, ring);
1159         return error;
1160 }
1161
1162 static void
1163 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1164 {
1165         int i;
1166
1167         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1168                 struct iwm_tx_data *data = &ring->data[i];
1169
1170                 if (data->m != NULL) {
1171                         bus_dmamap_sync(ring->data_dmat, data->map,
1172                             BUS_DMASYNC_POSTWRITE);
1173                         bus_dmamap_unload(ring->data_dmat, data->map);
1174                         m_freem(data->m);
1175                         data->m = NULL;
1176                 }
1177         }
1178         /* Clear TX descriptors. */
1179         memset(ring->desc, 0, ring->desc_dma.size);
1180         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1181             BUS_DMASYNC_PREWRITE);
1182         sc->qfullmsk &= ~(1 << ring->qid);
1183         ring->queued = 0;
1184         ring->cur = 0;
1185 }
1186
1187 static void
1188 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1189 {
1190         int i;
1191
1192         iwm_dma_contig_free(&ring->desc_dma);
1193         iwm_dma_contig_free(&ring->cmd_dma);
1194
1195         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1196                 struct iwm_tx_data *data = &ring->data[i];
1197
1198                 if (data->m != NULL) {
1199                         bus_dmamap_sync(ring->data_dmat, data->map,
1200                             BUS_DMASYNC_POSTWRITE);
1201                         bus_dmamap_unload(ring->data_dmat, data->map);
1202                         m_freem(data->m);
1203                         data->m = NULL;
1204                 }
1205                 if (data->map != NULL) {
1206                         bus_dmamap_destroy(ring->data_dmat, data->map);
1207                         data->map = NULL;
1208                 }
1209         }
1210         if (ring->data_dmat != NULL) {
1211                 bus_dma_tag_destroy(ring->data_dmat);
1212                 ring->data_dmat = NULL;
1213         }
1214 }
1215
1216 /*
1217  * High-level hardware frobbing routines
1218  */
1219
1220 static void
1221 iwm_enable_interrupts(struct iwm_softc *sc)
1222 {
1223         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1224         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1225 }
1226
1227 static void
1228 iwm_restore_interrupts(struct iwm_softc *sc)
1229 {
1230         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1231 }
1232
1233 static void
1234 iwm_disable_interrupts(struct iwm_softc *sc)
1235 {
1236         /* disable interrupts */
1237         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1238
1239         /* acknowledge all interrupts */
1240         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1241         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1242 }
1243
1244 static void
1245 iwm_ict_reset(struct iwm_softc *sc)
1246 {
1247         iwm_disable_interrupts(sc);
1248
1249         /* Reset ICT table. */
1250         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1251         sc->ict_cur = 0;
1252
1253         /* Set physical address of ICT table (4KB aligned). */
1254         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1255             IWM_CSR_DRAM_INT_TBL_ENABLE
1256             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1257             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1258             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1259
1260         /* Switch to ICT interrupt mode in driver. */
1261         sc->sc_flags |= IWM_FLAG_USE_ICT;
1262
1263         /* Re-enable interrupts. */
1264         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1265         iwm_enable_interrupts(sc);
1266 }
1267
1268 /* iwlwifi pcie/trans.c */
1269
1270 /*
1271  * Since this .. hard-resets things, it's time to actually
1272  * mark the first vap (if any) as having no mac context.
1273  * It's annoying, but since the driver is potentially being
1274  * stop/start'ed whilst active (thanks openbsd port!) we
1275  * have to correctly track this.
1276  */
1277 static void
1278 iwm_stop_device(struct iwm_softc *sc)
1279 {
1280         struct ieee80211com *ic = &sc->sc_ic;
1281         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1282         int chnl, qid;
1283         uint32_t mask = 0;
1284
1285         /* tell the device to stop sending interrupts */
1286         iwm_disable_interrupts(sc);
1287
1288         /*
1289          * FreeBSD-local: mark the first vap as not-uploaded,
1290          * so the next transition through auth/assoc
1291          * will correctly populate the MAC context.
1292          */
1293         if (vap) {
1294                 struct iwm_vap *iv = IWM_VAP(vap);
1295                 iv->is_uploaded = 0;
1296         }
1297
1298         /* device going down, Stop using ICT table */
1299         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1300
1301         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1302
1303         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1304
1305         if (iwm_nic_lock(sc)) {
1306                 /* Stop each Tx DMA channel */
1307                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1308                         IWM_WRITE(sc,
1309                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1310                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1311                 }
1312
1313                 /* Wait for DMA channels to be idle */
1314                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1315                     5000)) {
1316                         device_printf(sc->sc_dev,
1317                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1318                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1319                 }
1320                 iwm_nic_unlock(sc);
1321         }
1322         iwm_disable_rx_dma(sc);
1323
1324         /* Stop RX ring. */
1325         iwm_reset_rx_ring(sc, &sc->rxq);
1326
1327         /* Reset all TX rings. */
1328         for (qid = 0; qid < nitems(sc->txq); qid++)
1329                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1330
1331         /*
1332          * Power-down device's busmaster DMA clocks
1333          */
1334         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1335         DELAY(5);
1336
1337         /* Make sure (redundant) we've released our request to stay awake */
1338         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1339             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1340
1341         /* Stop the device, and put it in low power state */
1342         iwm_apm_stop(sc);
1343
1344         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1345          * Clean again the interrupt here
1346          */
1347         iwm_disable_interrupts(sc);
1348         /* stop and reset the on-board processor */
1349         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1350
1351         /*
1352          * Even if we stop the HW, we still want the RF kill
1353          * interrupt
1354          */
1355         iwm_enable_rfkill_int(sc);
1356         iwm_check_rfkill(sc);
1357 }
1358
1359 /* iwlwifi: mvm/ops.c */
1360 static void
1361 iwm_mvm_nic_config(struct iwm_softc *sc)
1362 {
1363         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1364         uint32_t reg_val = 0;
1365
1366         radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1367             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1368         radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1369             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1370         radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1371             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1372
1373         /* SKU control */
1374         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1375             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1376         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1377             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1378
1379         /* radio configuration */
1380         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1381         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1382         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1383
1384         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1385
1386         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1387             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1388             radio_cfg_step, radio_cfg_dash);
1389
1390         /*
1391          * W/A : NIC is stuck in a reset state after Early PCIe power off
1392          * (PCIe power is lost before PERST# is asserted), causing ME FW
1393          * to lose ownership and not being able to obtain it back.
1394          */
1395         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1396                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1397                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1398                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1399         }
1400 }
1401
1402 static int
1403 iwm_nic_rx_init(struct iwm_softc *sc)
1404 {
1405         if (!iwm_nic_lock(sc))
1406                 return EBUSY;
1407
1408         /*
1409          * Initialize RX ring.  This is from the iwn driver.
1410          */
1411         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1412
1413         /* stop DMA */
1414         iwm_disable_rx_dma(sc);
1415         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1416         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1417         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1418         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1419
1420         /* Set physical address of RX ring (256-byte aligned). */
1421         IWM_WRITE(sc,
1422             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1423
1424         /* Set physical address of RX status (16-byte aligned). */
1425         IWM_WRITE(sc,
1426             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1427
1428         /* Enable RX. */
1429         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1430             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1431             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1432             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1433             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1434             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1435             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1436             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1437
1438         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1439
1440         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1441         if (sc->host_interrupt_operation_mode)
1442                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1443
1444         /*
1445          * Thus sayeth el jefe (iwlwifi) via a comment:
1446          *
1447          * This value should initially be 0 (before preparing any
1448          * RBs), should be 8 after preparing the first 8 RBs (for example)
1449          */
1450         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1451
1452         iwm_nic_unlock(sc);
1453
1454         return 0;
1455 }
1456
1457 static int
1458 iwm_nic_tx_init(struct iwm_softc *sc)
1459 {
1460         int qid;
1461
1462         if (!iwm_nic_lock(sc))
1463                 return EBUSY;
1464
1465         /* Deactivate TX scheduler. */
1466         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1467
1468         /* Set physical address of "keep warm" page (16-byte aligned). */
1469         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1470
1471         /* Initialize TX rings. */
1472         for (qid = 0; qid < nitems(sc->txq); qid++) {
1473                 struct iwm_tx_ring *txq = &sc->txq[qid];
1474
1475                 /* Set physical address of TX ring (256-byte aligned). */
1476                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1477                     txq->desc_dma.paddr >> 8);
1478                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1479                     "%s: loading ring %d descriptors (%p) at %lx\n",
1480                     __func__,
1481                     qid, txq->desc,
1482                     (unsigned long) (txq->desc_dma.paddr >> 8));
1483         }
1484
1485         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1486
1487         iwm_nic_unlock(sc);
1488
1489         return 0;
1490 }
1491
1492 static int
1493 iwm_nic_init(struct iwm_softc *sc)
1494 {
1495         int error;
1496
1497         iwm_apm_init(sc);
1498         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1499                 iwm_set_pwr(sc);
1500
1501         iwm_mvm_nic_config(sc);
1502
1503         if ((error = iwm_nic_rx_init(sc)) != 0)
1504                 return error;
1505
1506         /*
1507          * Ditto for TX, from iwn
1508          */
1509         if ((error = iwm_nic_tx_init(sc)) != 0)
1510                 return error;
1511
1512         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1513             "%s: shadow registers enabled\n", __func__);
1514         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1515
1516         return 0;
1517 }
1518
1519 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1520         IWM_MVM_TX_FIFO_VO,
1521         IWM_MVM_TX_FIFO_VI,
1522         IWM_MVM_TX_FIFO_BE,
1523         IWM_MVM_TX_FIFO_BK,
1524 };
1525
1526 static int
1527 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1528 {
1529         if (!iwm_nic_lock(sc)) {
1530                 device_printf(sc->sc_dev,
1531                     "%s: cannot enable txq %d\n",
1532                     __func__,
1533                     qid);
1534                 return EBUSY;
1535         }
1536
1537         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1538
1539         if (qid == IWM_MVM_CMD_QUEUE) {
1540                 /* unactivate before configuration */
1541                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1542                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1543                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1544
1545                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1546
1547                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1548
1549                 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1550                 /* Set scheduler window size and frame limit. */
1551                 iwm_write_mem32(sc,
1552                     sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1553                     sizeof(uint32_t),
1554                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1555                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1556                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1557                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1558
1559                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1560                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1561                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1562                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1563                     IWM_SCD_QUEUE_STTS_REG_MSK);
1564         } else {
1565                 struct iwm_scd_txq_cfg_cmd cmd;
1566                 int error;
1567
1568                 iwm_nic_unlock(sc);
1569
1570                 memset(&cmd, 0, sizeof(cmd));
1571                 cmd.scd_queue = qid;
1572                 cmd.enable = 1;
1573                 cmd.sta_id = sta_id;
1574                 cmd.tx_fifo = fifo;
1575                 cmd.aggregate = 0;
1576                 cmd.window = IWM_FRAME_LIMIT;
1577
1578                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1579                     sizeof(cmd), &cmd);
1580                 if (error) {
1581                         device_printf(sc->sc_dev,
1582                             "cannot enable txq %d\n", qid);
1583                         return error;
1584                 }
1585
1586                 if (!iwm_nic_lock(sc))
1587                         return EBUSY;
1588         }
1589
1590         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1591             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1592
1593         iwm_nic_unlock(sc);
1594
1595         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1596             __func__, qid, fifo);
1597
1598         return 0;
1599 }
1600
1601 static int
1602 iwm_post_alive(struct iwm_softc *sc)
1603 {
1604         int nwords;
1605         int error, chnl;
1606         uint32_t base;
1607
1608         if (!iwm_nic_lock(sc))
1609                 return EBUSY;
1610
1611         base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1612         if (sc->sched_base != base) {
1613                 device_printf(sc->sc_dev,
1614                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1615                     __func__, sc->sched_base, base);
1616         }
1617
1618         iwm_ict_reset(sc);
1619
1620         /* Clear TX scheduler state in SRAM. */
1621         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1622             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1623             / sizeof(uint32_t);
1624         error = iwm_write_mem(sc,
1625             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1626             NULL, nwords);
1627         if (error)
1628                 goto out;
1629
1630         /* Set physical address of TX scheduler rings (1KB aligned). */
1631         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1632
1633         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1634
1635         iwm_nic_unlock(sc);
1636
1637         /* enable command channel */
1638         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1639         if (error)
1640                 return error;
1641
1642         if (!iwm_nic_lock(sc))
1643                 return EBUSY;
1644
1645         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1646
1647         /* Enable DMA channels. */
1648         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1649                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1650                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1651                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1652         }
1653
1654         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1655             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1656
1657         /* Enable L1-Active */
1658         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1659                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1660                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1661         }
1662
1663  out:
1664         iwm_nic_unlock(sc);
1665         return error;
1666 }
1667
1668 /*
1669  * NVM read access and content parsing.  We do not support
1670  * external NVM or writing NVM.
1671  * iwlwifi/mvm/nvm.c
1672  */
1673
1674 /* list of NVM sections we are allowed/need to read */
1675 const int nvm_to_read[] = {
1676         IWM_NVM_SECTION_TYPE_HW,
1677         IWM_NVM_SECTION_TYPE_SW,
1678         IWM_NVM_SECTION_TYPE_REGULATORY,
1679         IWM_NVM_SECTION_TYPE_CALIBRATION,
1680         IWM_NVM_SECTION_TYPE_PRODUCTION,
1681         IWM_NVM_SECTION_TYPE_HW_8000,
1682         IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
1683         IWM_NVM_SECTION_TYPE_PHY_SKU,
1684 };
1685
1686 /* Default NVM size to read */
1687 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1688 #define IWM_MAX_NVM_SECTION_SIZE        8192
1689
1690 #define IWM_NVM_WRITE_OPCODE 1
1691 #define IWM_NVM_READ_OPCODE 0
1692
1693 /* load nvm chunk response */
1694 #define IWM_READ_NVM_CHUNK_SUCCEED              0
1695 #define IWM_READ_NVM_CHUNK_INVALID_ADDRESS      1
1696
1697 static int
1698 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1699         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1700 {
1701         offset = 0;
1702         struct iwm_nvm_access_cmd nvm_access_cmd = {
1703                 .offset = htole16(offset),
1704                 .length = htole16(length),
1705                 .type = htole16(section),
1706                 .op_code = IWM_NVM_READ_OPCODE,
1707         };
1708         struct iwm_nvm_access_resp *nvm_resp;
1709         struct iwm_rx_packet *pkt;
1710         struct iwm_host_cmd cmd = {
1711                 .id = IWM_NVM_ACCESS_CMD,
1712                 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1713                     IWM_CMD_SEND_IN_RFKILL,
1714                 .data = { &nvm_access_cmd, },
1715         };
1716         int ret, offset_read;
1717         size_t bytes_read;
1718         uint8_t *resp_data;
1719
1720         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1721
1722         ret = iwm_send_cmd(sc, &cmd);
1723         if (ret) {
1724                 device_printf(sc->sc_dev,
1725                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1726                 return ret;
1727         }
1728
1729         pkt = cmd.resp_pkt;
1730         if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1731                 device_printf(sc->sc_dev,
1732                     "Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1733                     pkt->hdr.flags);
1734                 ret = EIO;
1735                 goto exit;
1736         }
1737
1738         /* Extract NVM response */
1739         nvm_resp = (void *)pkt->data;
1740
1741         ret = le16toh(nvm_resp->status);
1742         bytes_read = le16toh(nvm_resp->length);
1743         offset_read = le16toh(nvm_resp->offset);
1744         resp_data = nvm_resp->data;
1745         if (ret) {
1746                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1747                     "NVM access command failed with status %d\n", ret);
1748                 ret = EINVAL;
1749                 goto exit;
1750         }
1751
1752         if (offset_read != offset) {
1753                 device_printf(sc->sc_dev,
1754                     "NVM ACCESS response with invalid offset %d\n",
1755                     offset_read);
1756                 ret = EINVAL;
1757                 goto exit;
1758         }
1759
1760         if (bytes_read > length) {
1761                 device_printf(sc->sc_dev,
1762                     "NVM ACCESS response with too much data "
1763                     "(%d bytes requested, %zd bytes received)\n",
1764                     length, bytes_read);
1765                 ret = EINVAL;
1766                 goto exit;
1767         }
1768
1769         memcpy(data + offset, resp_data, bytes_read);
1770         *len = bytes_read;
1771
1772  exit:
1773         iwm_free_resp(sc, &cmd);
1774         return ret;
1775 }
1776
1777 /*
1778  * Reads an NVM section completely.
1779  * NICs prior to 7000 family don't have a real NVM, but just read
1780  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1781  * by uCode, we need to manually check in this case that we don't
1782  * overflow and try to read more than the EEPROM size.
1783  * For 7000 family NICs, we supply the maximal size we can read, and
1784  * the uCode fills the response with as much data as we can,
1785  * without overflowing, so no check is needed.
1786  */
1787 static int
1788 iwm_nvm_read_section(struct iwm_softc *sc,
1789         uint16_t section, uint8_t *data, uint16_t *len, size_t max_len)
1790 {
1791         uint16_t chunklen, seglen;
1792         int error = 0;
1793
1794         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1795             "reading NVM section %d\n", section);
1796
1797         chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1798         *len = 0;
1799
1800         /* Read NVM chunks until exhausted (reading less than requested) */
1801         while (seglen == chunklen && *len < max_len) {
1802                 error = iwm_nvm_read_chunk(sc,
1803                     section, *len, chunklen, data, &seglen);
1804                 if (error) {
1805                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1806                             "Cannot read from NVM section "
1807                             "%d at offset %d\n", section, *len);
1808                         return error;
1809                 }
1810                 *len += seglen;
1811         }
1812
1813         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1814             "NVM section %d read completed (%d bytes, error=%d)\n",
1815             section, *len, error);
1816         return error;
1817 }
1818
1819 /*
1820  * BEGIN IWM_NVM_PARSE
1821  */
1822
1823 /* iwlwifi/iwl-nvm-parse.c */
1824
1825 /* NVM offsets (in words) definitions */
1826 enum iwm_nvm_offsets {
1827         /* NVM HW-Section offset (in words) definitions */
1828         IWM_HW_ADDR = 0x15,
1829
1830 /* NVM SW-Section offset (in words) definitions */
1831         IWM_NVM_SW_SECTION = 0x1C0,
1832         IWM_NVM_VERSION = 0,
1833         IWM_RADIO_CFG = 1,
1834         IWM_SKU = 2,
1835         IWM_N_HW_ADDRS = 3,
1836         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1837
1838 /* NVM calibration section offset (in words) definitions */
1839         IWM_NVM_CALIB_SECTION = 0x2B8,
1840         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1841 };
1842
1843 enum iwm_8000_nvm_offsets {
1844         /* NVM HW-Section offset (in words) definitions */
1845         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1846         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1847         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1848         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1849         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1850
1851         /* NVM SW-Section offset (in words) definitions */
1852         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1853         IWM_NVM_VERSION_8000 = 0,
1854         IWM_RADIO_CFG_8000 = 0,
1855         IWM_SKU_8000 = 2,
1856         IWM_N_HW_ADDRS_8000 = 3,
1857
1858         /* NVM REGULATORY -Section offset (in words) definitions */
1859         IWM_NVM_CHANNELS_8000 = 0,
1860         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1861         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1862         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1863
1864         /* NVM calibration section offset (in words) definitions */
1865         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1866         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1867 };
1868
1869 /* SKU Capabilities (actual values from NVM definition) */
1870 enum nvm_sku_bits {
1871         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1872         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1873         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1874         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1875 };
1876
1877 /* radio config bits (actual values from NVM definition) */
1878 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1879 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1880 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1881 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1882 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1883 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1884
1885 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1886 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1887 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1888 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1889 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1890 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1891
1892 #define DEFAULT_MAX_TX_POWER 16
1893
1894 /**
1895  * enum iwm_nvm_channel_flags - channel flags in NVM
1896  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1897  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1898  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1899  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1900  * XXX cannot find this (DFS) flag in iwl-nvm-parse.c
1901  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1902  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1903  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1904  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1905  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1906  */
1907 enum iwm_nvm_channel_flags {
1908         IWM_NVM_CHANNEL_VALID = (1 << 0),
1909         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1910         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1911         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1912         IWM_NVM_CHANNEL_DFS = (1 << 7),
1913         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1914         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1915         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1916         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1917 };
1918
1919 /*
1920  * Translate EEPROM flags to net80211.
1921  */
1922 static uint32_t
1923 iwm_eeprom_channel_flags(uint16_t ch_flags)
1924 {
1925         uint32_t nflags;
1926
1927         nflags = 0;
1928         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1929                 nflags |= IEEE80211_CHAN_PASSIVE;
1930         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1931                 nflags |= IEEE80211_CHAN_NOADHOC;
1932         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1933                 nflags |= IEEE80211_CHAN_DFS;
1934                 /* Just in case. */
1935                 nflags |= IEEE80211_CHAN_NOADHOC;
1936         }
1937
1938         return (nflags);
1939 }
1940
1941 static void
1942 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1943     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1944     const uint8_t bands[])
1945 {
1946         const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
1947         uint32_t nflags;
1948         uint16_t ch_flags;
1949         uint8_t ieee;
1950         int error;
1951
1952         for (; ch_idx < ch_num; ch_idx++) {
1953                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1954                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1955                         ieee = iwm_nvm_channels[ch_idx];
1956                 else
1957                         ieee = iwm_nvm_channels_8000[ch_idx];
1958
1959                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1960                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1961                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
1962                             ieee, ch_flags,
1963                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1964                             "5.2" : "2.4");
1965                         continue;
1966                 }
1967
1968                 nflags = iwm_eeprom_channel_flags(ch_flags);
1969                 error = ieee80211_add_channel(chans, maxchans, nchans,
1970                     ieee, 0, 0, nflags, bands);
1971                 if (error != 0)
1972                         break;
1973
1974                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1975                     "Ch. %d Flags %x [%sGHz] - Added\n",
1976                     ieee, ch_flags,
1977                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1978                     "5.2" : "2.4");
1979         }
1980 }
1981
1982 static void
1983 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1984     struct ieee80211_channel chans[])
1985 {
1986         struct iwm_softc *sc = ic->ic_softc;
1987         struct iwm_nvm_data *data = &sc->sc_nvm;
1988         uint8_t bands[IEEE80211_MODE_BYTES];
1989         size_t ch_num;
1990
1991         memset(bands, 0, sizeof(bands));
1992         /* 1-13: 11b/g channels. */
1993         setbit(bands, IEEE80211_MODE_11B);
1994         setbit(bands, IEEE80211_MODE_11G);
1995         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1996             IWM_NUM_2GHZ_CHANNELS - 1, bands);
1997
1998         /* 14: 11b channel only. */
1999         clrbit(bands, IEEE80211_MODE_11G);
2000         iwm_add_channel_band(sc, chans, maxchans, nchans,
2001             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2002
2003         if (data->sku_cap_band_52GHz_enable) {
2004                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2005                         ch_num = nitems(iwm_nvm_channels);
2006                 else
2007                         ch_num = nitems(iwm_nvm_channels_8000);
2008                 memset(bands, 0, sizeof(bands));
2009                 setbit(bands, IEEE80211_MODE_11A);
2010                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2011                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2012         }
2013 }
2014
2015 static void
2016 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2017         const uint16_t *mac_override, const uint16_t *nvm_hw)
2018 {
2019         const uint8_t *hw_addr;
2020
2021         if (mac_override) {
2022                 static const uint8_t reserved_mac[] = {
2023                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2024                 };
2025
2026                 hw_addr = (const uint8_t *)(mac_override +
2027                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2028
2029                 /*
2030                  * Store the MAC address from MAO section.
2031                  * No byte swapping is required in MAO section
2032                  */
2033                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2034
2035                 /*
2036                  * Force the use of the OTP MAC address in case of reserved MAC
2037                  * address in the NVM, or if address is given but invalid.
2038                  */
2039                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2040                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2041                     iwm_is_valid_ether_addr(data->hw_addr) &&
2042                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2043                         return;
2044
2045                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2046                     "%s: mac address from nvm override section invalid\n",
2047                     __func__);
2048         }
2049
2050         if (nvm_hw) {
2051                 /* read the mac address from WFMP registers */
2052                 uint32_t mac_addr0 =
2053                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2054                 uint32_t mac_addr1 =
2055                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2056
2057                 hw_addr = (const uint8_t *)&mac_addr0;
2058                 data->hw_addr[0] = hw_addr[3];
2059                 data->hw_addr[1] = hw_addr[2];
2060                 data->hw_addr[2] = hw_addr[1];
2061                 data->hw_addr[3] = hw_addr[0];
2062
2063                 hw_addr = (const uint8_t *)&mac_addr1;
2064                 data->hw_addr[4] = hw_addr[1];
2065                 data->hw_addr[5] = hw_addr[0];
2066
2067                 return;
2068         }
2069
2070         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2071         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2072 }
2073
2074 static int
2075 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2076             const uint16_t *phy_sku)
2077 {
2078         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2079                 return le16_to_cpup(nvm_sw + IWM_SKU);
2080
2081         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2082 }
2083
2084 static int
2085 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2086 {
2087         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2088                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2089         else
2090                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2091                                                 IWM_NVM_VERSION_8000));
2092 }
2093
2094 static int
2095 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2096                   const uint16_t *phy_sku)
2097 {
2098         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2099                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2100
2101         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2102 }
2103
2104 static int
2105 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2106 {
2107         int n_hw_addr;
2108
2109         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2110                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2111
2112         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2113
2114         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2115 }
2116
2117 static void
2118 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2119                   uint32_t radio_cfg)
2120 {
2121         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2122                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2123                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2124                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2125                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2126                 return;
2127         }
2128
2129         /* set the radio configuration for family 8000 */
2130         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2131         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2132         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2133         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2134         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2135         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2136 }
2137
2138 static int
2139 iwm_parse_nvm_data(struct iwm_softc *sc,
2140                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2141                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2142                    const uint16_t *phy_sku, const uint16_t *regulatory)
2143 {
2144         struct iwm_nvm_data *data = &sc->sc_nvm;
2145         uint8_t hw_addr[IEEE80211_ADDR_LEN];
2146         uint32_t sku, radio_cfg;
2147
2148         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2149
2150         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2151         iwm_set_radio_cfg(sc, data, radio_cfg);
2152
2153         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2154         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2155         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2156         data->sku_cap_11n_enable = 0;
2157
2158         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2159
2160         /* The byte order is little endian 16 bit, meaning 214365 */
2161         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2162                 IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
2163                 data->hw_addr[0] = hw_addr[1];
2164                 data->hw_addr[1] = hw_addr[0];
2165                 data->hw_addr[2] = hw_addr[3];
2166                 data->hw_addr[3] = hw_addr[2];
2167                 data->hw_addr[4] = hw_addr[5];
2168                 data->hw_addr[5] = hw_addr[4];
2169         } else {
2170                 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2171         }
2172
2173         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2174                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2175                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2176         } else {
2177                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2178                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2179         }
2180         data->calib_version = 255;   /* TODO:
2181                                         this value will prevent some checks from
2182                                         failing, we need to check if this
2183                                         field is still needed, and if it does,
2184                                         where is it in the NVM */
2185
2186         return 0;
2187 }
2188
2189 /*
2190  * END NVM PARSE
2191  */
2192
2193 static int
2194 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2195 {
2196         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2197
2198         /* Checking for required sections */
2199         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2200                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2201                     !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2202                         device_printf(sc->sc_dev,
2203                             "Can't parse empty OTP/NVM sections\n");
2204                         return ENOENT;
2205                 }
2206
2207                 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2208         } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2209                 /* SW and REGULATORY sections are mandatory */
2210                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2211                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2212                         device_printf(sc->sc_dev,
2213                             "Can't parse empty OTP/NVM sections\n");
2214                         return ENOENT;
2215                 }
2216                 /* MAC_OVERRIDE or at least HW section must exist */
2217                 if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2218                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2219                         device_printf(sc->sc_dev,
2220                             "Can't parse mac_address, empty sections\n");
2221                         return ENOENT;
2222                 }
2223
2224                 /* PHY_SKU section is mandatory in B0 */
2225                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2226                         device_printf(sc->sc_dev,
2227                             "Can't parse phy_sku in B0, empty sections\n");
2228                         return ENOENT;
2229                 }
2230
2231                 hw = (const uint16_t *)
2232                     sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2233         } else {
2234                 panic("unknown device family %d\n", sc->sc_device_family);
2235         }
2236
2237         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2238         calib = (const uint16_t *)
2239             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2240         regulatory = (const uint16_t *)
2241             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2242         mac_override = (const uint16_t *)
2243             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2244         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2245
2246         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2247             phy_sku, regulatory);
2248 }
2249
2250 static int
2251 iwm_nvm_init(struct iwm_softc *sc)
2252 {
2253         struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2254         int i, section, error;
2255         uint16_t len;
2256         uint8_t *buf;
2257         const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
2258
2259         memset(nvm_sections, 0 , sizeof(nvm_sections));
2260
2261         buf = malloc(bufsz, M_DEVBUF, M_NOWAIT);
2262         if (buf == NULL)
2263                 return ENOMEM;
2264
2265         for (i = 0; i < nitems(nvm_to_read); i++) {
2266                 section = nvm_to_read[i];
2267                 KASSERT(section <= nitems(nvm_sections),
2268                     ("too many sections"));
2269
2270                 error = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2271                 if (error) {
2272                         error = 0;
2273                         continue;
2274                 }
2275                 nvm_sections[section].data = malloc(len, M_DEVBUF, M_NOWAIT);
2276                 if (nvm_sections[section].data == NULL) {
2277                         error = ENOMEM;
2278                         break;
2279                 }
2280                 memcpy(nvm_sections[section].data, buf, len);
2281                 nvm_sections[section].length = len;
2282         }
2283         free(buf, M_DEVBUF);
2284         if (error == 0)
2285                 error = iwm_parse_nvm_sections(sc, nvm_sections);
2286
2287         for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2288                 if (nvm_sections[i].data != NULL)
2289                         free(nvm_sections[i].data, M_DEVBUF);
2290         }
2291
2292         return error;
2293 }
2294
2295 /*
2296  * Firmware loading gunk.  This is kind of a weird hybrid between the
2297  * iwn driver and the Linux iwlwifi driver.
2298  */
2299
2300 static int
2301 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2302         const uint8_t *section, uint32_t byte_cnt)
2303 {
2304         int error = EINVAL;
2305         uint32_t chunk_sz, offset;
2306
2307         chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2308
2309         for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2310                 uint32_t addr, len;
2311                 const uint8_t *data;
2312
2313                 addr = dst_addr + offset;
2314                 len = MIN(chunk_sz, byte_cnt - offset);
2315                 data = section + offset;
2316
2317                 error = iwm_firmware_load_chunk(sc, addr, data, len);
2318                 if (error)
2319                         break;
2320         }
2321
2322         return error;
2323 }
2324
2325 static int
2326 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2327         const uint8_t *chunk, uint32_t byte_cnt)
2328 {
2329         struct iwm_dma_info *dma = &sc->fw_dma;
2330         int error;
2331
2332         /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2333         memcpy(dma->vaddr, chunk, byte_cnt);
2334         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2335
2336         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2337             dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2338                 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2339                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2340         }
2341
2342         sc->sc_fw_chunk_done = 0;
2343
2344         if (!iwm_nic_lock(sc))
2345                 return EBUSY;
2346
2347         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2348             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2349         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2350             dst_addr);
2351         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2352             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2353         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2354             (iwm_get_dma_hi_addr(dma->paddr)
2355               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2356         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2357             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2358             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2359             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2360         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2361             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2362             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2363             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2364
2365         iwm_nic_unlock(sc);
2366
2367         /* wait 1s for this segment to load */
2368         while (!sc->sc_fw_chunk_done)
2369                 if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
2370                         break;
2371
2372         if (!sc->sc_fw_chunk_done) {
2373                 device_printf(sc->sc_dev,
2374                     "fw chunk addr 0x%x len %d failed to load\n",
2375                     dst_addr, byte_cnt);
2376         }
2377
2378         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2379             dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2380                 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2381                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2382                 iwm_nic_unlock(sc);
2383         }
2384
2385         return error;
2386 }
2387
2388 int
2389 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2390     int cpu, int *first_ucode_section)
2391 {
2392         int shift_param;
2393         int i, error = 0, sec_num = 0x1;
2394         uint32_t val, last_read_idx = 0;
2395         const void *data;
2396         uint32_t dlen;
2397         uint32_t offset;
2398
2399         if (cpu == 1) {
2400                 shift_param = 0;
2401                 *first_ucode_section = 0;
2402         } else {
2403                 shift_param = 16;
2404                 (*first_ucode_section)++;
2405         }
2406
2407         for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
2408                 last_read_idx = i;
2409                 data = fws->fw_sect[i].fws_data;
2410                 dlen = fws->fw_sect[i].fws_len;
2411                 offset = fws->fw_sect[i].fws_devoff;
2412
2413                 /*
2414                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2415                  * CPU1 to CPU2.
2416                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2417                  * CPU2 non paged to CPU2 paging sec.
2418                  */
2419                 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2420                     offset == IWM_PAGING_SEPARATOR_SECTION)
2421                         break;
2422
2423                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2424                     "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2425                     i, offset, dlen, cpu);
2426
2427                 if (dlen > sc->sc_fwdmasegsz) {
2428                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2429                             "chunk %d too large (%d bytes)\n", i, dlen);
2430                         error = EFBIG;
2431                 } else {
2432                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2433                 }
2434                 if (error) {
2435                         device_printf(sc->sc_dev,
2436                             "could not load firmware chunk %d (error %d)\n",
2437                             i, error);
2438                         return error;
2439                 }
2440
2441                 /* Notify the ucode of the loaded section number and status */
2442                 if (iwm_nic_lock(sc)) {
2443                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2444                         val = val | (sec_num << shift_param);
2445                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2446                         sec_num = (sec_num << 1) | 0x1;
2447                         iwm_nic_unlock(sc);
2448
2449                         /*
2450                          * The firmware won't load correctly without this delay.
2451                          */
2452                         DELAY(8000);
2453                 }
2454         }
2455
2456         *first_ucode_section = last_read_idx;
2457
2458         if (iwm_nic_lock(sc)) {
2459                 if (cpu == 1)
2460                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2461                 else
2462                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2463                 iwm_nic_unlock(sc);
2464         }
2465
2466         return 0;
2467 }
2468
2469 int
2470 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2471 {
2472         struct iwm_fw_sects *fws;
2473         int error = 0;
2474         int first_ucode_section;
2475
2476         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2477             ucode_type);
2478
2479         fws = &sc->sc_fw.fw_sects[ucode_type];
2480
2481         /* configure the ucode to be ready to get the secured image */
2482         /* release CPU reset */
2483         iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2484
2485         /* load to FW the binary Secured sections of CPU1 */
2486         error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2487         if (error)
2488                 return error;
2489
2490         /* load to FW the binary sections of CPU2 */
2491         return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2492 }
2493
2494 static int
2495 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2496 {
2497         struct iwm_fw_sects *fws;
2498         int error, i;
2499         const void *data;
2500         uint32_t dlen;
2501         uint32_t offset;
2502
2503         sc->sc_uc.uc_intr = 0;
2504
2505         fws = &sc->sc_fw.fw_sects[ucode_type];
2506         for (i = 0; i < fws->fw_count; i++) {
2507                 data = fws->fw_sect[i].fws_data;
2508                 dlen = fws->fw_sect[i].fws_len;
2509                 offset = fws->fw_sect[i].fws_devoff;
2510                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2511                     "LOAD FIRMWARE type %d offset %u len %d\n",
2512                     ucode_type, offset, dlen);
2513                 if (dlen > sc->sc_fwdmasegsz) {
2514                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2515                             "chunk %d too large (%d bytes)\n", i, dlen);
2516                         error = EFBIG;
2517                 } else {
2518                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2519                 }
2520                 if (error) {
2521                         device_printf(sc->sc_dev,
2522                             "could not load firmware chunk %u of %u "
2523                             "(error=%d)\n", i, fws->fw_count, error);
2524                         return error;
2525                 }
2526         }
2527
2528         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2529
2530         return 0;
2531 }
2532
2533 static int
2534 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2535 {
2536         int error, w;
2537
2538         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
2539                 error = iwm_load_firmware_8000(sc, ucode_type);
2540         else
2541                 error = iwm_load_firmware_7000(sc, ucode_type);
2542         if (error)
2543                 return error;
2544
2545         /* wait for the firmware to load */
2546         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2547                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2548         }
2549         if (error || !sc->sc_uc.uc_ok) {
2550                 device_printf(sc->sc_dev, "could not load firmware\n");
2551                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2552                         device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2553                             iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2554                         device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2555                             iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2556                 }
2557         }
2558
2559         /*
2560          * Give the firmware some time to initialize.
2561          * Accessing it too early causes errors.
2562          */
2563         msleep(&w, &sc->sc_mtx, 0, "iwmfwinit", hz);
2564
2565         return error;
2566 }
2567
2568 /* iwlwifi: pcie/trans.c */
2569 static int
2570 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2571 {
2572         int error;
2573
2574         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2575
2576         if ((error = iwm_nic_init(sc)) != 0) {
2577                 device_printf(sc->sc_dev, "unable to init nic\n");
2578                 return error;
2579         }
2580
2581         /* make sure rfkill handshake bits are cleared */
2582         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2583         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2584             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2585
2586         /* clear (again), then enable host interrupts */
2587         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2588         iwm_enable_interrupts(sc);
2589
2590         /* really make sure rfkill handshake bits are cleared */
2591         /* maybe we should write a few times more?  just to make sure */
2592         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2593         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2594
2595         /* Load the given image to the HW */
2596         return iwm_load_firmware(sc, ucode_type);
2597 }
2598
2599 static int
2600 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2601 {
2602         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2603                 .valid = htole32(valid_tx_ant),
2604         };
2605
2606         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2607             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2608 }
2609
2610 /* iwlwifi: mvm/fw.c */
2611 static int
2612 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2613 {
2614         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2615         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2616
2617         /* Set parameters */
2618         phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2619         phy_cfg_cmd.calib_control.event_trigger =
2620             sc->sc_default_calib[ucode_type].event_trigger;
2621         phy_cfg_cmd.calib_control.flow_trigger =
2622             sc->sc_default_calib[ucode_type].flow_trigger;
2623
2624         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2625             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2626         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2627             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2628 }
2629
2630 static int
2631 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2632         enum iwm_ucode_type ucode_type)
2633 {
2634         enum iwm_ucode_type old_type = sc->sc_uc_current;
2635         int error;
2636
2637         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2638                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2639                         error);
2640                 return error;
2641         }
2642
2643         sc->sc_uc_current = ucode_type;
2644         error = iwm_start_fw(sc, ucode_type);
2645         if (error) {
2646                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2647                 sc->sc_uc_current = old_type;
2648                 return error;
2649         }
2650
2651         error = iwm_post_alive(sc);
2652         if (error) {
2653                 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2654         }
2655         return error;
2656 }
2657
2658 /*
2659  * mvm misc bits
2660  */
2661
2662 /*
2663  * follows iwlwifi/fw.c
2664  */
2665 static int
2666 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2667 {
2668         int error;
2669
2670         /* do not operate with rfkill switch turned on */
2671         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2672                 device_printf(sc->sc_dev,
2673                     "radio is disabled by hardware switch\n");
2674                 return EPERM;
2675         }
2676
2677         sc->sc_init_complete = 0;
2678         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2679             IWM_UCODE_TYPE_INIT)) != 0) {
2680                 device_printf(sc->sc_dev, "failed to load init firmware\n");
2681                 return error;
2682         }
2683
2684         if (justnvm) {
2685                 if ((error = iwm_nvm_init(sc)) != 0) {
2686                         device_printf(sc->sc_dev, "failed to read nvm\n");
2687                         return error;
2688                 }
2689                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2690
2691                 return 0;
2692         }
2693
2694         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
2695                 device_printf(sc->sc_dev,
2696                     "failed to send bt coex configuration: %d\n", error);
2697                 return error;
2698         }
2699
2700         /* Init Smart FIFO. */
2701         error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2702         if (error != 0)
2703                 return error;
2704
2705         /* Send TX valid antennas before triggering calibrations */
2706         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
2707                 device_printf(sc->sc_dev,
2708                     "failed to send antennas before calibration: %d\n", error);
2709                 return error;
2710         }
2711
2712         /*
2713          * Send phy configurations command to init uCode
2714          * to start the 16.0 uCode init image internal calibrations.
2715          */
2716         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2717                 device_printf(sc->sc_dev,
2718                     "%s: failed to run internal calibration: %d\n",
2719                     __func__, error);
2720                 return error;
2721         }
2722
2723         /*
2724          * Nothing to do but wait for the init complete notification
2725          * from the firmware
2726          */
2727         while (!sc->sc_init_complete) {
2728                 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2729                                  0, "iwminit", 2*hz);
2730                 if (error) {
2731                         device_printf(sc->sc_dev, "init complete failed: %d\n",
2732                                 sc->sc_init_complete);
2733                         break;
2734                 }
2735         }
2736
2737         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
2738             sc->sc_init_complete ? "" : "not ");
2739
2740         return error;
2741 }
2742
2743 /*
2744  * receive side
2745  */
2746
2747 /* (re)stock rx ring, called at init-time and at runtime */
2748 static int
2749 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2750 {
2751         struct iwm_rx_ring *ring = &sc->rxq;
2752         struct iwm_rx_data *data = &ring->data[idx];
2753         struct mbuf *m;
2754         bus_dmamap_t dmamap = NULL;
2755         bus_dma_segment_t seg;
2756         int nsegs, error;
2757
2758         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2759         if (m == NULL)
2760                 return ENOBUFS;
2761
2762         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2763         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2764             &seg, &nsegs, BUS_DMA_NOWAIT);
2765         if (error != 0) {
2766                 device_printf(sc->sc_dev,
2767                     "%s: can't map mbuf, error %d\n", __func__, error);
2768                 goto fail;
2769         }
2770
2771         if (data->m != NULL)
2772                 bus_dmamap_unload(ring->data_dmat, data->map);
2773
2774         /* Swap ring->spare_map with data->map */
2775         dmamap = data->map;
2776         data->map = ring->spare_map;
2777         ring->spare_map = dmamap;
2778
2779         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2780         data->m = m;
2781
2782         /* Update RX descriptor. */
2783         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
2784         ring->desc[idx] = htole32(seg.ds_addr >> 8);
2785         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2786             BUS_DMASYNC_PREWRITE);
2787
2788         return 0;
2789 fail:
2790         m_freem(m);
2791         return error;
2792 }
2793
2794 /* iwlwifi: mvm/rx.c */
2795 #define IWM_RSSI_OFFSET 50
2796 static int
2797 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2798 {
2799         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2800         uint32_t agc_a, agc_b;
2801         uint32_t val;
2802
2803         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2804         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2805         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2806
2807         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2808         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2809         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2810
2811         /*
2812          * dBm = rssi dB - agc dB - constant.
2813          * Higher AGC (higher radio gain) means lower signal.
2814          */
2815         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2816         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2817         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2818
2819         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2820             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2821             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2822
2823         return max_rssi_dbm;
2824 }
2825
2826 /* iwlwifi: mvm/rx.c */
2827 /*
2828  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2829  * values are reported by the fw as positive values - need to negate
2830  * to obtain their dBM.  Account for missing antennas by replacing 0
2831  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2832  */
2833 static int
2834 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2835 {
2836         int energy_a, energy_b, energy_c, max_energy;
2837         uint32_t val;
2838
2839         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2840         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2841             IWM_RX_INFO_ENERGY_ANT_A_POS;
2842         energy_a = energy_a ? -energy_a : -256;
2843         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2844             IWM_RX_INFO_ENERGY_ANT_B_POS;
2845         energy_b = energy_b ? -energy_b : -256;
2846         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2847             IWM_RX_INFO_ENERGY_ANT_C_POS;
2848         energy_c = energy_c ? -energy_c : -256;
2849         max_energy = MAX(energy_a, energy_b);
2850         max_energy = MAX(max_energy, energy_c);
2851
2852         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2853             "energy In A %d B %d C %d , and max %d\n",
2854             energy_a, energy_b, energy_c, max_energy);
2855
2856         return max_energy;
2857 }
2858
2859 static void
2860 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2861         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2862 {
2863         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2864
2865         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2866         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2867
2868         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2869 }
2870
2871 /*
2872  * Retrieve the average noise (in dBm) among receivers.
2873  */
2874 static int
2875 iwm_get_noise(struct iwm_softc *sc,
2876     const struct iwm_mvm_statistics_rx_non_phy *stats)
2877 {
2878         int i, total, nbant, noise;
2879
2880         total = nbant = noise = 0;
2881         for (i = 0; i < 3; i++) {
2882                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2883                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
2884                     __func__,
2885                     i,
2886                     noise);
2887
2888                 if (noise) {
2889                         total += noise;
2890                         nbant++;
2891                 }
2892         }
2893
2894         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
2895             __func__, nbant, total);
2896 #if 0
2897         /* There should be at least one antenna but check anyway. */
2898         return (nbant == 0) ? -127 : (total / nbant) - 107;
2899 #else
2900         /* For now, just hard-code it to -96 to be safe */
2901         return (-96);
2902 #endif
2903 }
2904
2905 /*
2906  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2907  *
2908  * Handles the actual data of the Rx packet from the fw
2909  */
2910 static void
2911 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2912         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2913 {
2914         struct ieee80211com *ic = &sc->sc_ic;
2915         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2916         struct ieee80211_frame *wh;
2917         struct ieee80211_node *ni;
2918         struct ieee80211_rx_stats rxs;
2919         struct mbuf *m;
2920         struct iwm_rx_phy_info *phy_info;
2921         struct iwm_rx_mpdu_res_start *rx_res;
2922         uint32_t len;
2923         uint32_t rx_pkt_status;
2924         int rssi;
2925
2926         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2927
2928         phy_info = &sc->sc_last_phy_info;
2929         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2930         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2931         len = le16toh(rx_res->byte_count);
2932         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2933
2934         m = data->m;
2935         m->m_data = pkt->data + sizeof(*rx_res);
2936         m->m_pkthdr.len = m->m_len = len;
2937
2938         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2939                 device_printf(sc->sc_dev,
2940                     "dsp size out of range [0,20]: %d\n",
2941                     phy_info->cfg_phy_cnt);
2942                 return;
2943         }
2944
2945         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2946             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2947                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2948                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2949                 return; /* drop */
2950         }
2951
2952         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2953                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2954         } else {
2955                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
2956         }
2957
2958         /* Note: RSSI is absolute (ie a -ve value) */
2959         if (rssi < IWM_MIN_DBM)
2960                 rssi = IWM_MIN_DBM;
2961         else if (rssi > IWM_MAX_DBM)
2962                 rssi = IWM_MAX_DBM;
2963
2964         /* Map it to relative value */
2965         rssi = rssi - sc->sc_noise;
2966
2967         /* replenish ring for the buffer we're going to feed to the sharks */
2968         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
2969                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
2970                     __func__);
2971                 return;
2972         }
2973
2974         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2975             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
2976
2977         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2978
2979         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2980             "%s: phy_info: channel=%d, flags=0x%08x\n",
2981             __func__,
2982             le16toh(phy_info->channel),
2983             le16toh(phy_info->phy_flags));
2984
2985         /*
2986          * Populate an RX state struct with the provided information.
2987          */
2988         bzero(&rxs, sizeof(rxs));
2989         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
2990         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
2991         rxs.c_ieee = le16toh(phy_info->channel);
2992         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2993                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
2994         } else {
2995                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
2996         }
2997
2998         /* rssi is in 1/2db units */
2999         rxs.rssi = rssi * 2;
3000         rxs.nf = sc->sc_noise;
3001
3002         if (ieee80211_radiotap_active_vap(vap)) {
3003                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3004
3005                 tap->wr_flags = 0;
3006                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3007                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3008                 tap->wr_chan_freq = htole16(rxs.c_freq);
3009                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3010                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3011                 tap->wr_dbm_antsignal = (int8_t)rssi;
3012                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3013                 tap->wr_tsft = phy_info->system_timestamp;
3014                 switch (phy_info->rate) {
3015                 /* CCK rates. */
3016                 case  10: tap->wr_rate =   2; break;
3017                 case  20: tap->wr_rate =   4; break;
3018                 case  55: tap->wr_rate =  11; break;
3019                 case 110: tap->wr_rate =  22; break;
3020                 /* OFDM rates. */
3021                 case 0xd: tap->wr_rate =  12; break;
3022                 case 0xf: tap->wr_rate =  18; break;
3023                 case 0x5: tap->wr_rate =  24; break;
3024                 case 0x7: tap->wr_rate =  36; break;
3025                 case 0x9: tap->wr_rate =  48; break;
3026                 case 0xb: tap->wr_rate =  72; break;
3027                 case 0x1: tap->wr_rate =  96; break;
3028                 case 0x3: tap->wr_rate = 108; break;
3029                 /* Unknown rate: should not happen. */
3030                 default:  tap->wr_rate =   0;
3031                 }
3032         }
3033
3034         IWM_UNLOCK(sc);
3035         if (ni != NULL) {
3036                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3037                 ieee80211_input_mimo(ni, m, &rxs);
3038                 ieee80211_free_node(ni);
3039         } else {
3040                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3041                 ieee80211_input_mimo_all(ic, m, &rxs);
3042         }
3043         IWM_LOCK(sc);
3044 }
3045
3046 static int
3047 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3048         struct iwm_node *in)
3049 {
3050         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3051         struct ieee80211_node *ni = &in->in_ni;
3052         struct ieee80211vap *vap = ni->ni_vap;
3053         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3054         int failack = tx_resp->failure_frame;
3055
3056         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3057
3058         /* Update rate control statistics. */
3059         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3060             __func__,
3061             (int) le16toh(tx_resp->status.status),
3062             (int) le16toh(tx_resp->status.sequence),
3063             tx_resp->frame_count,
3064             tx_resp->bt_kill_count,
3065             tx_resp->failure_rts,
3066             tx_resp->failure_frame,
3067             le32toh(tx_resp->initial_rate),
3068             (int) le16toh(tx_resp->wireless_media_time));
3069
3070         if (status != IWM_TX_STATUS_SUCCESS &&
3071             status != IWM_TX_STATUS_DIRECT_DONE) {
3072                 ieee80211_ratectl_tx_complete(vap, ni,
3073                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3074                 return (1);
3075         } else {
3076                 ieee80211_ratectl_tx_complete(vap, ni,
3077                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3078                 return (0);
3079         }
3080 }
3081
3082 static void
3083 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3084         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3085 {
3086         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3087         int idx = cmd_hdr->idx;
3088         int qid = cmd_hdr->qid;
3089         struct iwm_tx_ring *ring = &sc->txq[qid];
3090         struct iwm_tx_data *txd = &ring->data[idx];
3091         struct iwm_node *in = txd->in;
3092         struct mbuf *m = txd->m;
3093         int status;
3094
3095         KASSERT(txd->done == 0, ("txd not done"));
3096         KASSERT(txd->in != NULL, ("txd without node"));
3097         KASSERT(txd->m != NULL, ("txd without mbuf"));
3098
3099         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3100
3101         sc->sc_tx_timer = 0;
3102
3103         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3104
3105         /* Unmap and free mbuf. */
3106         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3107         bus_dmamap_unload(ring->data_dmat, txd->map);
3108
3109         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3110             "free txd %p, in %p\n", txd, txd->in);
3111         txd->done = 1;
3112         txd->m = NULL;
3113         txd->in = NULL;
3114
3115         ieee80211_tx_complete(&in->in_ni, m, status);
3116
3117         if (--ring->queued < IWM_TX_RING_LOMARK) {
3118                 sc->qfullmsk &= ~(1 << ring->qid);
3119                 if (sc->qfullmsk == 0) {
3120                         /*
3121                          * Well, we're in interrupt context, but then again
3122                          * I guess net80211 does all sorts of stunts in
3123                          * interrupt context, so maybe this is no biggie.
3124                          */
3125                         iwm_start(sc);
3126                 }
3127         }
3128 }
3129
3130 /*
3131  * transmit side
3132  */
3133
3134 /*
3135  * Process a "command done" firmware notification.  This is where we wakeup
3136  * processes waiting for a synchronous command completion.
3137  * from if_iwn
3138  */
3139 static void
3140 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3141 {
3142         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3143         struct iwm_tx_data *data;
3144
3145         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3146                 return; /* Not a command ack. */
3147         }
3148
3149         /* XXX wide commands? */
3150         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3151             "cmd notification type 0x%x qid %d idx %d\n",
3152             pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3153
3154         data = &ring->data[pkt->hdr.idx];
3155
3156         /* If the command was mapped in an mbuf, free it. */
3157         if (data->m != NULL) {
3158                 bus_dmamap_sync(ring->data_dmat, data->map,
3159                     BUS_DMASYNC_POSTWRITE);
3160                 bus_dmamap_unload(ring->data_dmat, data->map);
3161                 m_freem(data->m);
3162                 data->m = NULL;
3163         }
3164         wakeup(&ring->desc[pkt->hdr.idx]);
3165 }
3166
3167 #if 0
3168 /*
3169  * necessary only for block ack mode
3170  */
3171 void
3172 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3173         uint16_t len)
3174 {
3175         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3176         uint16_t w_val;
3177
3178         scd_bc_tbl = sc->sched_dma.vaddr;
3179
3180         len += 8; /* magic numbers came naturally from paris */
3181         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3182                 len = roundup(len, 4) / 4;
3183
3184         w_val = htole16(sta_id << 12 | len);
3185
3186         /* Update TX scheduler. */
3187         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3188         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3189             BUS_DMASYNC_PREWRITE);
3190
3191         /* I really wonder what this is ?!? */
3192         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3193                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3194                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3195                     BUS_DMASYNC_PREWRITE);
3196         }
3197 }
3198 #endif
3199
3200 /*
3201  * Take an 802.11 (non-n) rate, find the relevant rate
3202  * table entry.  return the index into in_ridx[].
3203  *
3204  * The caller then uses that index back into in_ridx
3205  * to figure out the rate index programmed /into/
3206  * the firmware for this given node.
3207  */
3208 static int
3209 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3210     uint8_t rate)
3211 {
3212         int i;
3213         uint8_t r;
3214
3215         for (i = 0; i < nitems(in->in_ridx); i++) {
3216                 r = iwm_rates[in->in_ridx[i]].rate;
3217                 if (rate == r)
3218                         return (i);
3219         }
3220         /* XXX Return the first */
3221         /* XXX TODO: have it return the /lowest/ */
3222         return (0);
3223 }
3224
3225 /*
3226  * Fill in the rate related information for a transmit command.
3227  */
3228 static const struct iwm_rate *
3229 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3230         struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3231 {
3232         struct ieee80211com *ic = &sc->sc_ic;
3233         struct ieee80211_node *ni = &in->in_ni;
3234         const struct iwm_rate *rinfo;
3235         int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3236         int ridx, rate_flags;
3237
3238         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3239         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3240
3241         /*
3242          * XXX TODO: everything about the rate selection here is terrible!
3243          */
3244
3245         if (type == IEEE80211_FC0_TYPE_DATA) {
3246                 int i;
3247                 /* for data frames, use RS table */
3248                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3249                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3250                 ridx = in->in_ridx[i];
3251
3252                 /* This is the index into the programmed table */
3253                 tx->initial_rate_index = i;
3254                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3255                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3256                     "%s: start with i=%d, txrate %d\n",
3257                     __func__, i, iwm_rates[ridx].rate);
3258         } else {
3259                 /*
3260                  * For non-data, use the lowest supported rate for the given
3261                  * operational mode.
3262                  *
3263                  * Note: there may not be any rate control information available.
3264                  * This driver currently assumes if we're transmitting data
3265                  * frames, use the rate control table.  Grr.
3266                  *
3267                  * XXX TODO: use the configured rate for the traffic type!
3268                  * XXX TODO: this should be per-vap, not curmode; as we later
3269                  * on we'll want to handle off-channel stuff (eg TDLS).
3270                  */
3271                 if (ic->ic_curmode == IEEE80211_MODE_11A) {
3272                         /*
3273                          * XXX this assumes the mode is either 11a or not 11a;
3274                          * definitely won't work for 11n.
3275                          */
3276                         ridx = IWM_RIDX_OFDM;
3277                 } else {
3278                         ridx = IWM_RIDX_CCK;
3279                 }
3280         }
3281
3282         rinfo = &iwm_rates[ridx];
3283
3284         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3285             __func__, ridx,
3286             rinfo->rate,
3287             !! (IWM_RIDX_IS_CCK(ridx))
3288             );
3289
3290         /* XXX TODO: hard-coded TX antenna? */
3291         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3292         if (IWM_RIDX_IS_CCK(ridx))
3293                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3294         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3295
3296         return rinfo;
3297 }
3298
3299 #define TB0_SIZE 16
3300 static int
3301 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3302 {
3303         struct ieee80211com *ic = &sc->sc_ic;
3304         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3305         struct iwm_node *in = IWM_NODE(ni);
3306         struct iwm_tx_ring *ring;
3307         struct iwm_tx_data *data;
3308         struct iwm_tfd *desc;
3309         struct iwm_device_cmd *cmd;
3310         struct iwm_tx_cmd *tx;
3311         struct ieee80211_frame *wh;
3312         struct ieee80211_key *k = NULL;
3313         struct mbuf *m1;
3314         const struct iwm_rate *rinfo;
3315         uint32_t flags;
3316         u_int hdrlen;
3317         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3318         int nsegs;
3319         uint8_t tid, type;
3320         int i, totlen, error, pad;
3321
3322         wh = mtod(m, struct ieee80211_frame *);
3323         hdrlen = ieee80211_anyhdrsize(wh);
3324         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3325         tid = 0;
3326         ring = &sc->txq[ac];
3327         desc = &ring->desc[ring->cur];
3328         memset(desc, 0, sizeof(*desc));
3329         data = &ring->data[ring->cur];
3330
3331         /* Fill out iwm_tx_cmd to send to the firmware */
3332         cmd = &ring->cmd[ring->cur];
3333         cmd->hdr.code = IWM_TX_CMD;
3334         cmd->hdr.flags = 0;
3335         cmd->hdr.qid = ring->qid;
3336         cmd->hdr.idx = ring->cur;
3337
3338         tx = (void *)cmd->data;
3339         memset(tx, 0, sizeof(*tx));
3340
3341         rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3342
3343         /* Encrypt the frame if need be. */
3344         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3345                 /* Retrieve key for TX && do software encryption. */
3346                 k = ieee80211_crypto_encap(ni, m);
3347                 if (k == NULL) {
3348                         m_freem(m);
3349                         return (ENOBUFS);
3350                 }
3351                 /* 802.11 header may have moved. */
3352                 wh = mtod(m, struct ieee80211_frame *);
3353         }
3354
3355         if (ieee80211_radiotap_active_vap(vap)) {
3356                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3357
3358                 tap->wt_flags = 0;
3359                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3360                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3361                 tap->wt_rate = rinfo->rate;
3362                 if (k != NULL)
3363                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3364                 ieee80211_radiotap_tx(vap, m);
3365         }
3366
3367
3368         totlen = m->m_pkthdr.len;
3369
3370         flags = 0;
3371         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3372                 flags |= IWM_TX_CMD_FLG_ACK;
3373         }
3374
3375         if (type == IEEE80211_FC0_TYPE_DATA
3376             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3377             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3378                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3379         }
3380
3381         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3382             type != IEEE80211_FC0_TYPE_DATA)
3383                 tx->sta_id = sc->sc_aux_sta.sta_id;
3384         else
3385                 tx->sta_id = IWM_STATION_ID;
3386
3387         if (type == IEEE80211_FC0_TYPE_MGT) {
3388                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3389
3390                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3391                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3392                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3393                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3394                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3395                 } else {
3396                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3397                 }
3398         } else {
3399                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3400         }
3401
3402         if (hdrlen & 3) {
3403                 /* First segment length must be a multiple of 4. */
3404                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3405                 pad = 4 - (hdrlen & 3);
3406         } else
3407                 pad = 0;
3408
3409         tx->driver_txop = 0;
3410         tx->next_frame_len = 0;
3411
3412         tx->len = htole16(totlen);
3413         tx->tid_tspec = tid;
3414         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3415
3416         /* Set physical address of "scratch area". */
3417         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3418         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3419
3420         /* Copy 802.11 header in TX command. */
3421         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3422
3423         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3424
3425         tx->sec_ctl = 0;
3426         tx->tx_flags |= htole32(flags);
3427
3428         /* Trim 802.11 header. */
3429         m_adj(m, hdrlen);
3430         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3431             segs, &nsegs, BUS_DMA_NOWAIT);
3432         if (error != 0) {
3433                 if (error != EFBIG) {
3434                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3435                             error);
3436                         m_freem(m);
3437                         return error;
3438                 }
3439                 /* Too many DMA segments, linearize mbuf. */
3440                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3441                 if (m1 == NULL) {
3442                         device_printf(sc->sc_dev,
3443                             "%s: could not defrag mbuf\n", __func__);
3444                         m_freem(m);
3445                         return (ENOBUFS);
3446                 }
3447                 m = m1;
3448
3449                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3450                     segs, &nsegs, BUS_DMA_NOWAIT);
3451                 if (error != 0) {
3452                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3453                             error);
3454                         m_freem(m);
3455                         return error;
3456                 }
3457         }
3458         data->m = m;
3459         data->in = in;
3460         data->done = 0;
3461
3462         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3463             "sending txd %p, in %p\n", data, data->in);
3464         KASSERT(data->in != NULL, ("node is NULL"));
3465
3466         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3467             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3468             ring->qid, ring->cur, totlen, nsegs,
3469             le32toh(tx->tx_flags),
3470             le32toh(tx->rate_n_flags),
3471             tx->initial_rate_index
3472             );
3473
3474         /* Fill TX descriptor. */
3475         desc->num_tbs = 2 + nsegs;
3476
3477         desc->tbs[0].lo = htole32(data->cmd_paddr);
3478         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3479             (TB0_SIZE << 4);
3480         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3481         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3482             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3483               + hdrlen + pad - TB0_SIZE) << 4);
3484
3485         /* Other DMA segments are for data payload. */
3486         for (i = 0; i < nsegs; i++) {
3487                 seg = &segs[i];
3488                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3489                 desc->tbs[i+2].hi_n_len = \
3490                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3491                     | ((seg->ds_len) << 4);
3492         }
3493
3494         bus_dmamap_sync(ring->data_dmat, data->map,
3495             BUS_DMASYNC_PREWRITE);
3496         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3497             BUS_DMASYNC_PREWRITE);
3498         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3499             BUS_DMASYNC_PREWRITE);
3500
3501 #if 0
3502         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3503 #endif
3504
3505         /* Kick TX ring. */
3506         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3507         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3508
3509         /* Mark TX ring as full if we reach a certain threshold. */
3510         if (++ring->queued > IWM_TX_RING_HIMARK) {
3511                 sc->qfullmsk |= 1 << ring->qid;
3512         }
3513
3514         return 0;
3515 }
3516
3517 static int
3518 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3519     const struct ieee80211_bpf_params *params)
3520 {
3521         struct ieee80211com *ic = ni->ni_ic;
3522         struct iwm_softc *sc = ic->ic_softc;
3523         int error = 0;
3524
3525         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3526             "->%s begin\n", __func__);
3527
3528         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3529                 m_freem(m);
3530                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3531                     "<-%s not RUNNING\n", __func__);
3532                 return (ENETDOWN);
3533         }
3534
3535         IWM_LOCK(sc);
3536         /* XXX fix this */
3537         if (params == NULL) {
3538                 error = iwm_tx(sc, m, ni, 0);
3539         } else {
3540                 error = iwm_tx(sc, m, ni, 0);
3541         }
3542         sc->sc_tx_timer = 5;
3543         IWM_UNLOCK(sc);
3544
3545         return (error);
3546 }
3547
3548 /*
3549  * mvm/tx.c
3550  */
3551
3552 #if 0
3553 /*
3554  * Note that there are transports that buffer frames before they reach
3555  * the firmware. This means that after flush_tx_path is called, the
3556  * queue might not be empty. The race-free way to handle this is to:
3557  * 1) set the station as draining
3558  * 2) flush the Tx path
3559  * 3) wait for the transport queues to be empty
3560  */
3561 int
3562 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
3563 {
3564         struct iwm_tx_path_flush_cmd flush_cmd = {
3565                 .queues_ctl = htole32(tfd_msk),
3566                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3567         };
3568         int ret;
3569
3570         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
3571             sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
3572             sizeof(flush_cmd), &flush_cmd);
3573         if (ret)
3574                 device_printf(sc->sc_dev,
3575                     "Flushing tx queue failed: %d\n", ret);
3576         return ret;
3577 }
3578 #endif
3579
3580 /*
3581  * BEGIN mvm/sta.c
3582  */
3583
3584 static int
3585 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3586         struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3587 {
3588         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3589             cmd, status);
3590 }
3591
3592 /* send station add/update command to firmware */
3593 static int
3594 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3595 {
3596         struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3597         int ret;
3598         uint32_t status;
3599
3600         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3601
3602         add_sta_cmd.sta_id = IWM_STATION_ID;
3603         add_sta_cmd.mac_id_n_color
3604             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3605                 IWM_DEFAULT_COLOR));
3606         if (!update) {
3607                 int ac;
3608                 for (ac = 0; ac < WME_NUM_AC; ac++) {
3609                         add_sta_cmd.tfd_queue_msk |=
3610                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3611                 }
3612                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3613         }
3614         add_sta_cmd.add_modify = update ? 1 : 0;
3615         add_sta_cmd.station_flags_msk
3616             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3617         add_sta_cmd.tid_disable_tx = htole16(0xffff);
3618         if (update)
3619                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3620
3621         status = IWM_ADD_STA_SUCCESS;
3622         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3623         if (ret)
3624                 return ret;
3625
3626         switch (status) {
3627         case IWM_ADD_STA_SUCCESS:
3628                 break;
3629         default:
3630                 ret = EIO;
3631                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3632                 break;
3633         }
3634
3635         return ret;
3636 }
3637
3638 static int
3639 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3640 {
3641         return iwm_mvm_sta_send_to_fw(sc, in, 0);
3642 }
3643
3644 static int
3645 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3646 {
3647         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3648 }
3649
3650 static int
3651 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3652         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3653 {
3654         struct iwm_mvm_add_sta_cmd_v7 cmd;
3655         int ret;
3656         uint32_t status;
3657
3658         memset(&cmd, 0, sizeof(cmd));
3659         cmd.sta_id = sta->sta_id;
3660         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3661
3662         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3663         cmd.tid_disable_tx = htole16(0xffff);
3664
3665         if (addr)
3666                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3667
3668         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3669         if (ret)
3670                 return ret;
3671
3672         switch (status) {
3673         case IWM_ADD_STA_SUCCESS:
3674                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3675                     "%s: Internal station added.\n", __func__);
3676                 return 0;
3677         default:
3678                 device_printf(sc->sc_dev,
3679                     "%s: Add internal station failed, status=0x%x\n",
3680                     __func__, status);
3681                 ret = EIO;
3682                 break;
3683         }
3684         return ret;
3685 }
3686
3687 static int
3688 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3689 {
3690         int ret;
3691
3692         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3693         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3694
3695         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3696         if (ret)
3697                 return ret;
3698
3699         ret = iwm_mvm_add_int_sta_common(sc,
3700             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3701
3702         if (ret)
3703                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3704         return ret;
3705 }
3706
3707 /*
3708  * END mvm/sta.c
3709  */
3710
3711 /*
3712  * BEGIN mvm/quota.c
3713  */
3714
3715 static int
3716 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3717 {
3718         struct iwm_time_quota_cmd cmd;
3719         int i, idx, ret, num_active_macs, quota, quota_rem;
3720         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3721         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3722         uint16_t id;
3723
3724         memset(&cmd, 0, sizeof(cmd));
3725
3726         /* currently, PHY ID == binding ID */
3727         if (in) {
3728                 id = in->in_phyctxt->id;
3729                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3730                 colors[id] = in->in_phyctxt->color;
3731
3732                 if (1)
3733                         n_ifs[id] = 1;
3734         }
3735
3736         /*
3737          * The FW's scheduling session consists of
3738          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3739          * equally between all the bindings that require quota
3740          */
3741         num_active_macs = 0;
3742         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3743                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3744                 num_active_macs += n_ifs[i];
3745         }
3746
3747         quota = 0;
3748         quota_rem = 0;
3749         if (num_active_macs) {
3750                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3751                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3752         }
3753
3754         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3755                 if (colors[i] < 0)
3756                         continue;
3757
3758                 cmd.quotas[idx].id_and_color =
3759                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3760
3761                 if (n_ifs[i] <= 0) {
3762                         cmd.quotas[idx].quota = htole32(0);
3763                         cmd.quotas[idx].max_duration = htole32(0);
3764                 } else {
3765                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3766                         cmd.quotas[idx].max_duration = htole32(0);
3767                 }
3768                 idx++;
3769         }
3770
3771         /* Give the remainder of the session to the first binding */
3772         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3773
3774         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3775             sizeof(cmd), &cmd);
3776         if (ret)
3777                 device_printf(sc->sc_dev,
3778                     "%s: Failed to send quota: %d\n", __func__, ret);
3779         return ret;
3780 }
3781
3782 /*
3783  * END mvm/quota.c
3784  */
3785
3786 /*
3787  * ieee80211 routines
3788  */
3789
3790 /*
3791  * Change to AUTH state in 80211 state machine.  Roughly matches what
3792  * Linux does in bss_info_changed().
3793  */
3794 static int
3795 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3796 {
3797         struct ieee80211_node *ni;
3798         struct iwm_node *in;
3799         struct iwm_vap *iv = IWM_VAP(vap);
3800         uint32_t duration;
3801         int error;
3802
3803         /*
3804          * XXX i have a feeling that the vap node is being
3805          * freed from underneath us. Grr.
3806          */
3807         ni = ieee80211_ref_node(vap->iv_bss);
3808         in = IWM_NODE(ni);
3809         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3810             "%s: called; vap=%p, bss ni=%p\n",
3811             __func__,
3812             vap,
3813             ni);
3814
3815         in->in_assoc = 0;
3816
3817         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3818         if (error != 0)
3819                 return error;
3820
3821         error = iwm_allow_mcast(vap, sc);
3822         if (error) {
3823                 device_printf(sc->sc_dev,
3824                     "%s: failed to set multicast\n", __func__);
3825                 goto out;
3826         }
3827
3828         /*
3829          * This is where it deviates from what Linux does.
3830          *
3831          * Linux iwlwifi doesn't reset the nic each time, nor does it
3832          * call ctxt_add() here.  Instead, it adds it during vap creation,
3833          * and always does a mac_ctx_changed().
3834          *
3835          * The openbsd port doesn't attempt to do that - it reset things
3836          * at odd states and does the add here.
3837          *
3838          * So, until the state handling is fixed (ie, we never reset
3839          * the NIC except for a firmware failure, which should drag
3840          * the NIC back to IDLE, re-setup and re-add all the mac/phy
3841          * contexts that are required), let's do a dirty hack here.
3842          */
3843         if (iv->is_uploaded) {
3844                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3845                         device_printf(sc->sc_dev,
3846                             "%s: failed to update MAC\n", __func__);
3847                         goto out;
3848                 }
3849                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3850                     in->in_ni.ni_chan, 1, 1)) != 0) {
3851                         device_printf(sc->sc_dev,
3852                             "%s: failed update phy ctxt\n", __func__);
3853                         goto out;
3854                 }
3855                 in->in_phyctxt = &sc->sc_phyctxt[0];
3856
3857                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3858                         device_printf(sc->sc_dev,
3859                             "%s: binding update cmd\n", __func__);
3860                         goto out;
3861                 }
3862                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3863                         device_printf(sc->sc_dev,
3864                             "%s: failed to update sta\n", __func__);
3865                         goto out;
3866                 }
3867         } else {
3868                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3869                         device_printf(sc->sc_dev,
3870                             "%s: failed to add MAC\n", __func__);
3871                         goto out;
3872                 }
3873                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3874                     in->in_ni.ni_chan, 1, 1)) != 0) {
3875                         device_printf(sc->sc_dev,
3876                             "%s: failed add phy ctxt!\n", __func__);
3877                         error = ETIMEDOUT;
3878                         goto out;
3879                 }
3880                 in->in_phyctxt = &sc->sc_phyctxt[0];
3881
3882                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3883                         device_printf(sc->sc_dev,
3884                             "%s: binding add cmd\n", __func__);
3885                         goto out;
3886                 }
3887                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3888                         device_printf(sc->sc_dev,
3889                             "%s: failed to add sta\n", __func__);
3890                         goto out;
3891                 }
3892         }
3893
3894         /*
3895          * Prevent the FW from wandering off channel during association
3896          * by "protecting" the session with a time event.
3897          */
3898         /* XXX duration is in units of TU, not MS */
3899         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3900         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3901         DELAY(100);
3902
3903         error = 0;
3904 out:
3905         ieee80211_free_node(ni);
3906         return (error);
3907 }
3908
3909 static int
3910 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3911 {
3912         struct iwm_node *in = IWM_NODE(vap->iv_bss);
3913         int error;
3914
3915         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3916                 device_printf(sc->sc_dev,
3917                     "%s: failed to update STA\n", __func__);
3918                 return error;
3919         }
3920
3921         in->in_assoc = 1;
3922         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3923                 device_printf(sc->sc_dev,
3924                     "%s: failed to update MAC\n", __func__);
3925                 return error;
3926         }
3927
3928         return 0;
3929 }
3930
3931 static int
3932 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3933 {
3934         /*
3935          * Ok, so *technically* the proper set of calls for going
3936          * from RUN back to SCAN is:
3937          *
3938          * iwm_mvm_power_mac_disable(sc, in);
3939          * iwm_mvm_mac_ctxt_changed(sc, in);
3940          * iwm_mvm_rm_sta(sc, in);
3941          * iwm_mvm_update_quotas(sc, NULL);
3942          * iwm_mvm_mac_ctxt_changed(sc, in);
3943          * iwm_mvm_binding_remove_vif(sc, in);
3944          * iwm_mvm_mac_ctxt_remove(sc, in);
3945          *
3946          * However, that freezes the device not matter which permutations
3947          * and modifications are attempted.  Obviously, this driver is missing
3948          * something since it works in the Linux driver, but figuring out what
3949          * is missing is a little more complicated.  Now, since we're going
3950          * back to nothing anyway, we'll just do a complete device reset.
3951          * Up your's, device!
3952          */
3953         /* iwm_mvm_flush_tx_path(sc, 0xf, 1); */
3954         iwm_stop_device(sc);
3955         iwm_init_hw(sc);
3956         if (in)
3957                 in->in_assoc = 0;
3958         return 0;
3959
3960 #if 0
3961         int error;
3962
3963         iwm_mvm_power_mac_disable(sc, in);
3964
3965         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3966                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
3967                 return error;
3968         }
3969
3970         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
3971                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
3972                 return error;
3973         }
3974         error = iwm_mvm_rm_sta(sc, in);
3975         in->in_assoc = 0;
3976         iwm_mvm_update_quotas(sc, NULL);
3977         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3978                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
3979                 return error;
3980         }
3981         iwm_mvm_binding_remove_vif(sc, in);
3982
3983         iwm_mvm_mac_ctxt_remove(sc, in);
3984
3985         return error;
3986 #endif
3987 }
3988
3989 static struct ieee80211_node *
3990 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3991 {
3992         return malloc(sizeof (struct iwm_node), M_80211_NODE,
3993             M_NOWAIT | M_ZERO);
3994 }
3995
3996 static void
3997 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
3998 {
3999         struct ieee80211_node *ni = &in->in_ni;
4000         struct iwm_lq_cmd *lq = &in->in_lq;
4001         int nrates = ni->ni_rates.rs_nrates;
4002         int i, ridx, tab = 0;
4003         int txant = 0;
4004
4005         if (nrates > nitems(lq->rs_table)) {
4006                 device_printf(sc->sc_dev,
4007                     "%s: node supports %d rates, driver handles "
4008                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4009                 return;
4010         }
4011         if (nrates == 0) {
4012                 device_printf(sc->sc_dev,
4013                     "%s: node supports 0 rates, odd!\n", __func__);
4014                 return;
4015         }
4016
4017         /*
4018          * XXX .. and most of iwm_node is not initialised explicitly;
4019          * it's all just 0x0 passed to the firmware.
4020          */
4021
4022         /* first figure out which rates we should support */
4023         /* XXX TODO: this isn't 11n aware /at all/ */
4024         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4025         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4026             "%s: nrates=%d\n", __func__, nrates);
4027
4028         /*
4029          * Loop over nrates and populate in_ridx from the highest
4030          * rate to the lowest rate.  Remember, in_ridx[] has
4031          * IEEE80211_RATE_MAXSIZE entries!
4032          */
4033         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4034                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4035
4036                 /* Map 802.11 rate to HW rate index. */
4037                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4038                         if (iwm_rates[ridx].rate == rate)
4039                                 break;
4040                 if (ridx > IWM_RIDX_MAX) {
4041                         device_printf(sc->sc_dev,
4042                             "%s: WARNING: device rate for %d not found!\n",
4043                             __func__, rate);
4044                 } else {
4045                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4046                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4047                             __func__,
4048                             i,
4049                             rate,
4050                             ridx);
4051                         in->in_ridx[i] = ridx;
4052                 }
4053         }
4054
4055         /* then construct a lq_cmd based on those */
4056         memset(lq, 0, sizeof(*lq));
4057         lq->sta_id = IWM_STATION_ID;
4058
4059         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4060         if (ni->ni_flags & IEEE80211_NODE_HT)
4061                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4062
4063         /*
4064          * are these used? (we don't do SISO or MIMO)
4065          * need to set them to non-zero, though, or we get an error.
4066          */
4067         lq->single_stream_ant_msk = 1;
4068         lq->dual_stream_ant_msk = 1;
4069
4070         /*
4071          * Build the actual rate selection table.
4072          * The lowest bits are the rates.  Additionally,
4073          * CCK needs bit 9 to be set.  The rest of the bits
4074          * we add to the table select the tx antenna
4075          * Note that we add the rates in the highest rate first
4076          * (opposite of ni_rates).
4077          */
4078         /*
4079          * XXX TODO: this should be looping over the min of nrates
4080          * and LQ_MAX_RETRY_NUM.  Sigh.
4081          */
4082         for (i = 0; i < nrates; i++) {
4083                 int nextant;
4084
4085                 if (txant == 0)
4086                         txant = iwm_fw_valid_tx_ant(sc);
4087                 nextant = 1<<(ffs(txant)-1);
4088                 txant &= ~nextant;
4089
4090                 /*
4091                  * Map the rate id into a rate index into
4092                  * our hardware table containing the
4093                  * configuration to use for this rate.
4094                  */
4095                 ridx = in->in_ridx[i];
4096                 tab = iwm_rates[ridx].plcp;
4097                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4098                 if (IWM_RIDX_IS_CCK(ridx))
4099                         tab |= IWM_RATE_MCS_CCK_MSK;
4100                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4101                     "station rate i=%d, rate=%d, hw=%x\n",
4102                     i, iwm_rates[ridx].rate, tab);
4103                 lq->rs_table[i] = htole32(tab);
4104         }
4105         /* then fill the rest with the lowest possible rate */
4106         for (i = nrates; i < nitems(lq->rs_table); i++) {
4107                 KASSERT(tab != 0, ("invalid tab"));
4108                 lq->rs_table[i] = htole32(tab);
4109         }
4110 }
4111
4112 static int
4113 iwm_media_change(struct ifnet *ifp)
4114 {
4115         struct ieee80211vap *vap = ifp->if_softc;
4116         struct ieee80211com *ic = vap->iv_ic;
4117         struct iwm_softc *sc = ic->ic_softc;
4118         int error;
4119
4120         error = ieee80211_media_change(ifp);
4121         if (error != ENETRESET)
4122                 return error;
4123
4124         IWM_LOCK(sc);
4125         if (ic->ic_nrunning > 0) {
4126                 iwm_stop(sc);
4127                 iwm_init(sc);
4128         }
4129         IWM_UNLOCK(sc);
4130         return error;
4131 }
4132
4133
4134 static int
4135 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4136 {
4137         struct iwm_vap *ivp = IWM_VAP(vap);
4138         struct ieee80211com *ic = vap->iv_ic;
4139         struct iwm_softc *sc = ic->ic_softc;
4140         struct iwm_node *in;
4141         int error;
4142
4143         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4144             "switching state %s -> %s\n",
4145             ieee80211_state_name[vap->iv_state],
4146             ieee80211_state_name[nstate]);
4147         IEEE80211_UNLOCK(ic);
4148         IWM_LOCK(sc);
4149
4150         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4151                 iwm_led_blink_stop(sc);
4152
4153         /* disable beacon filtering if we're hopping out of RUN */
4154         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4155                 iwm_mvm_disable_beacon_filter(sc);
4156
4157                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4158                         in->in_assoc = 0;
4159
4160                 iwm_release(sc, NULL);
4161
4162                 /*
4163                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4164                  * above then the card will be completely reinitialized,
4165                  * so the driver must do everything necessary to bring the card
4166                  * from INIT to SCAN.
4167                  *
4168                  * Additionally, upon receiving deauth frame from AP,
4169                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4170                  * state. This will also fail with this driver, so bring the FSM
4171                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4172                  *
4173                  * XXX TODO: fix this for FreeBSD!
4174                  */
4175                 if (nstate == IEEE80211_S_SCAN ||
4176                     nstate == IEEE80211_S_AUTH ||
4177                     nstate == IEEE80211_S_ASSOC) {
4178                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4179                             "Force transition to INIT; MGT=%d\n", arg);
4180                         IWM_UNLOCK(sc);
4181                         IEEE80211_LOCK(ic);
4182                         /* Always pass arg as -1 since we can't Tx right now. */
4183                         /*
4184                          * XXX arg is just ignored anyway when transitioning
4185                          *     to IEEE80211_S_INIT.
4186                          */
4187                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4188                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4189                             "Going INIT->SCAN\n");
4190                         nstate = IEEE80211_S_SCAN;
4191                         IEEE80211_UNLOCK(ic);
4192                         IWM_LOCK(sc);
4193                 }
4194         }
4195
4196         switch (nstate) {
4197         case IEEE80211_S_INIT:
4198                 break;
4199
4200         case IEEE80211_S_AUTH:
4201                 if ((error = iwm_auth(vap, sc)) != 0) {
4202                         device_printf(sc->sc_dev,
4203                             "%s: could not move to auth state: %d\n",
4204                             __func__, error);
4205                         break;
4206                 }
4207                 break;
4208
4209         case IEEE80211_S_ASSOC:
4210                 if ((error = iwm_assoc(vap, sc)) != 0) {
4211                         device_printf(sc->sc_dev,
4212                             "%s: failed to associate: %d\n", __func__,
4213                             error);
4214                         break;
4215                 }
4216                 break;
4217
4218         case IEEE80211_S_RUN:
4219         {
4220                 struct iwm_host_cmd cmd = {
4221                         .id = IWM_LQ_CMD,
4222                         .len = { sizeof(in->in_lq), },
4223                         .flags = IWM_CMD_SYNC,
4224                 };
4225
4226                 /* Update the association state, now we have it all */
4227                 /* (eg associd comes in at this point */
4228                 error = iwm_assoc(vap, sc);
4229                 if (error != 0) {
4230                         device_printf(sc->sc_dev,
4231                             "%s: failed to update association state: %d\n",
4232                             __func__,
4233                             error);
4234                         break;
4235                 }
4236
4237                 in = IWM_NODE(vap->iv_bss);
4238                 iwm_mvm_power_mac_update_mode(sc, in);
4239                 iwm_mvm_enable_beacon_filter(sc, in);
4240                 iwm_mvm_update_quotas(sc, in);
4241                 iwm_setrates(sc, in);
4242
4243                 cmd.data[0] = &in->in_lq;
4244                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4245                         device_printf(sc->sc_dev,
4246                             "%s: IWM_LQ_CMD failed\n", __func__);
4247                 }
4248
4249                 iwm_mvm_led_enable(sc);
4250                 break;
4251         }
4252
4253         default:
4254                 break;
4255         }
4256         IWM_UNLOCK(sc);
4257         IEEE80211_LOCK(ic);
4258
4259         return (ivp->iv_newstate(vap, nstate, arg));
4260 }
4261
4262 void
4263 iwm_endscan_cb(void *arg, int pending)
4264 {
4265         struct iwm_softc *sc = arg;
4266         struct ieee80211com *ic = &sc->sc_ic;
4267
4268         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4269             "%s: scan ended\n",
4270             __func__);
4271
4272         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4273 }
4274
4275 /*
4276  * Aging and idle timeouts for the different possible scenarios
4277  * in default configuration
4278  */
4279 static const uint32_t
4280 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4281         {
4282                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4283                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4284         },
4285         {
4286                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4287                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4288         },
4289         {
4290                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4291                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4292         },
4293         {
4294                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4295                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4296         },
4297         {
4298                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4299                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4300         },
4301 };
4302
4303 /*
4304  * Aging and idle timeouts for the different possible scenarios
4305  * in single BSS MAC configuration.
4306  */
4307 static const uint32_t
4308 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4309         {
4310                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4311                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4312         },
4313         {
4314                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4315                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4316         },
4317         {
4318                 htole32(IWM_SF_MCAST_AGING_TIMER),
4319                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4320         },
4321         {
4322                 htole32(IWM_SF_BA_AGING_TIMER),
4323                 htole32(IWM_SF_BA_IDLE_TIMER)
4324         },
4325         {
4326                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4327                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4328         },
4329 };
4330
4331 static void
4332 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4333     struct ieee80211_node *ni)
4334 {
4335         int i, j, watermark;
4336
4337         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4338
4339         /*
4340          * If we are in association flow - check antenna configuration
4341          * capabilities of the AP station, and choose the watermark accordingly.
4342          */
4343         if (ni) {
4344                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4345 #ifdef notyet
4346                         if (ni->ni_rxmcs[2] != 0)
4347                                 watermark = IWM_SF_W_MARK_MIMO3;
4348                         else if (ni->ni_rxmcs[1] != 0)
4349                                 watermark = IWM_SF_W_MARK_MIMO2;
4350                         else
4351 #endif
4352                                 watermark = IWM_SF_W_MARK_SISO;
4353                 } else {
4354                         watermark = IWM_SF_W_MARK_LEGACY;
4355                 }
4356         /* default watermark value for unassociated mode. */
4357         } else {
4358                 watermark = IWM_SF_W_MARK_MIMO2;
4359         }
4360         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4361
4362         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4363                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4364                         sf_cmd->long_delay_timeouts[i][j] =
4365                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4366                 }
4367         }
4368
4369         if (ni) {
4370                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4371                        sizeof(iwm_sf_full_timeout));
4372         } else {
4373                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4374                        sizeof(iwm_sf_full_timeout_def));
4375         }
4376 }
4377
4378 static int
4379 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4380 {
4381         struct ieee80211com *ic = &sc->sc_ic;
4382         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4383         struct iwm_sf_cfg_cmd sf_cmd = {
4384                 .state = htole32(IWM_SF_FULL_ON),
4385         };
4386         int ret = 0;
4387
4388         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
4389                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4390
4391         switch (new_state) {
4392         case IWM_SF_UNINIT:
4393         case IWM_SF_INIT_OFF:
4394                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4395                 break;
4396         case IWM_SF_FULL_ON:
4397                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4398                 break;
4399         default:
4400                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4401                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4402                           new_state);
4403                 return EINVAL;
4404         }
4405
4406         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4407                                    sizeof(sf_cmd), &sf_cmd);
4408         return ret;
4409 }
4410
4411 static int
4412 iwm_send_bt_init_conf(struct iwm_softc *sc)
4413 {
4414         struct iwm_bt_coex_cmd bt_cmd;
4415
4416         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4417         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4418
4419         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4420             &bt_cmd);
4421 }
4422
4423 static int
4424 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4425 {
4426         struct iwm_mcc_update_cmd mcc_cmd;
4427         struct iwm_host_cmd hcmd = {
4428                 .id = IWM_MCC_UPDATE_CMD,
4429                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4430                 .data = { &mcc_cmd },
4431         };
4432         int ret;
4433 #ifdef IWM_DEBUG
4434         struct iwm_rx_packet *pkt;
4435         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4436         struct iwm_mcc_update_resp *mcc_resp;
4437         int n_channels;
4438         uint16_t mcc;
4439 #endif
4440         int resp_v2 = isset(sc->sc_enabled_capa,
4441             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4442
4443         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4444         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4445         if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4446             isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4447                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4448         else
4449                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4450
4451         if (resp_v2)
4452                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4453         else
4454                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4455
4456         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4457             "send MCC update to FW with '%c%c' src = %d\n",
4458             alpha2[0], alpha2[1], mcc_cmd.source_id);
4459
4460         ret = iwm_send_cmd(sc, &hcmd);
4461         if (ret)
4462                 return ret;
4463
4464 #ifdef IWM_DEBUG
4465         pkt = hcmd.resp_pkt;
4466
4467         /* Extract MCC response */
4468         if (resp_v2) {
4469                 mcc_resp = (void *)pkt->data;
4470                 mcc = mcc_resp->mcc;
4471                 n_channels =  le32toh(mcc_resp->n_channels);
4472         } else {
4473                 mcc_resp_v1 = (void *)pkt->data;
4474                 mcc = mcc_resp_v1->mcc;
4475                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4476         }
4477
4478         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4479         if (mcc == 0)
4480                 mcc = 0x3030;  /* "00" - world */
4481
4482         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4483             "regulatory domain '%c%c' (%d channels available)\n",
4484             mcc >> 8, mcc & 0xff, n_channels);
4485 #endif
4486         iwm_free_resp(sc, &hcmd);
4487
4488         return 0;
4489 }
4490
4491 static void
4492 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4493 {
4494         struct iwm_host_cmd cmd = {
4495                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4496                 .len = { sizeof(uint32_t), },
4497                 .data = { &backoff, },
4498         };
4499
4500         if (iwm_send_cmd(sc, &cmd) != 0) {
4501                 device_printf(sc->sc_dev,
4502                     "failed to change thermal tx backoff\n");
4503         }
4504 }
4505
4506 static int
4507 iwm_init_hw(struct iwm_softc *sc)
4508 {
4509         struct ieee80211com *ic = &sc->sc_ic;
4510         int error, i, ac;
4511
4512         if ((error = iwm_start_hw(sc)) != 0) {
4513                 printf("iwm_start_hw: failed %d\n", error);
4514                 return error;
4515         }
4516
4517         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4518                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4519                 return error;
4520         }
4521
4522         /*
4523          * should stop and start HW since that INIT
4524          * image just loaded
4525          */
4526         iwm_stop_device(sc);
4527         if ((error = iwm_start_hw(sc)) != 0) {
4528                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4529                 return error;
4530         }
4531
4532         /* omstart, this time with the regular firmware */
4533         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
4534         if (error) {
4535                 device_printf(sc->sc_dev, "could not load firmware\n");
4536                 goto error;
4537         }
4538
4539         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4540                 device_printf(sc->sc_dev, "bt init conf failed\n");
4541                 goto error;
4542         }
4543
4544         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
4545                 device_printf(sc->sc_dev, "antenna config failed\n");
4546                 goto error;
4547         }
4548
4549         /* Send phy db control command and then phy db calibration*/
4550         if ((error = iwm_send_phy_db_data(sc)) != 0) {
4551                 device_printf(sc->sc_dev, "phy_db_data failed\n");
4552                 goto error;
4553         }
4554
4555         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4556                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4557                 goto error;
4558         }
4559
4560         /* Add auxiliary station for scanning */
4561         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4562                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4563                 goto error;
4564         }
4565
4566         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4567                 /*
4568                  * The channel used here isn't relevant as it's
4569                  * going to be overwritten in the other flows.
4570                  * For now use the first channel we have.
4571                  */
4572                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4573                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4574                         goto error;
4575         }
4576
4577         /* Initialize tx backoffs to the minimum. */
4578         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
4579                 iwm_mvm_tt_tx_backoff(sc, 0);
4580
4581         error = iwm_mvm_power_update_device(sc);
4582         if (error)
4583                 goto error;
4584
4585         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4586                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4587                         goto error;
4588         }
4589
4590         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4591                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4592                         goto error;
4593         }
4594
4595         /* Enable Tx queues. */
4596         for (ac = 0; ac < WME_NUM_AC; ac++) {
4597                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4598                     iwm_mvm_ac_to_tx_fifo[ac]);
4599                 if (error)
4600                         goto error;
4601         }
4602
4603         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4604                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4605                 goto error;
4606         }
4607
4608         return 0;
4609
4610  error:
4611         iwm_stop_device(sc);
4612         return error;
4613 }
4614
4615 /* Allow multicast from our BSSID. */
4616 static int
4617 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4618 {
4619         struct ieee80211_node *ni = vap->iv_bss;
4620         struct iwm_mcast_filter_cmd *cmd;
4621         size_t size;
4622         int error;
4623
4624         size = roundup(sizeof(*cmd), 4);
4625         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4626         if (cmd == NULL)
4627                 return ENOMEM;
4628         cmd->filter_own = 1;
4629         cmd->port_id = 0;
4630         cmd->count = 0;
4631         cmd->pass_all = 1;
4632         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4633
4634         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4635             IWM_CMD_SYNC, size, cmd);
4636         free(cmd, M_DEVBUF);
4637
4638         return (error);
4639 }
4640
4641 /*
4642  * ifnet interfaces
4643  */
4644
4645 static void
4646 iwm_init(struct iwm_softc *sc)
4647 {
4648         int error;
4649
4650         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4651                 return;
4652         }
4653         sc->sc_generation++;
4654         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4655
4656         if ((error = iwm_init_hw(sc)) != 0) {
4657                 printf("iwm_init_hw failed %d\n", error);
4658                 iwm_stop(sc);
4659                 return;
4660         }
4661
4662         /*
4663          * Ok, firmware loaded and we are jogging
4664          */
4665         sc->sc_flags |= IWM_FLAG_HW_INITED;
4666         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4667 }
4668
4669 static int
4670 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4671 {
4672         struct iwm_softc *sc;
4673         int error;
4674
4675         sc = ic->ic_softc;
4676
4677         IWM_LOCK(sc);
4678         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4679                 IWM_UNLOCK(sc);
4680                 return (ENXIO);
4681         }
4682         error = mbufq_enqueue(&sc->sc_snd, m);
4683         if (error) {
4684                 IWM_UNLOCK(sc);
4685                 return (error);
4686         }
4687         iwm_start(sc);
4688         IWM_UNLOCK(sc);
4689         return (0);
4690 }
4691
4692 /*
4693  * Dequeue packets from sendq and call send.
4694  */
4695 static void
4696 iwm_start(struct iwm_softc *sc)
4697 {
4698         struct ieee80211_node *ni;
4699         struct mbuf *m;
4700         int ac = 0;
4701
4702         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4703         while (sc->qfullmsk == 0 &&
4704                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4705                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4706                 if (iwm_tx(sc, m, ni, ac) != 0) {
4707                         if_inc_counter(ni->ni_vap->iv_ifp,
4708                             IFCOUNTER_OERRORS, 1);
4709                         ieee80211_free_node(ni);
4710                         continue;
4711                 }
4712                 sc->sc_tx_timer = 15;
4713         }
4714         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4715 }
4716
4717 static void
4718 iwm_stop(struct iwm_softc *sc)
4719 {
4720
4721         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4722         sc->sc_flags |= IWM_FLAG_STOPPED;
4723         sc->sc_generation++;
4724         iwm_led_blink_stop(sc);
4725         sc->sc_tx_timer = 0;
4726         iwm_stop_device(sc);
4727 }
4728
4729 static void
4730 iwm_watchdog(void *arg)
4731 {
4732         struct iwm_softc *sc = arg;
4733         struct ieee80211com *ic = &sc->sc_ic;
4734
4735         if (sc->sc_tx_timer > 0) {
4736                 if (--sc->sc_tx_timer == 0) {
4737                         device_printf(sc->sc_dev, "device timeout\n");
4738 #ifdef IWM_DEBUG
4739                         iwm_nic_error(sc);
4740 #endif
4741                         ieee80211_restart_all(ic);
4742                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4743                         return;
4744                 }
4745         }
4746         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4747 }
4748
4749 static void
4750 iwm_parent(struct ieee80211com *ic)
4751 {
4752         struct iwm_softc *sc = ic->ic_softc;
4753         int startall = 0;
4754
4755         IWM_LOCK(sc);
4756         if (ic->ic_nrunning > 0) {
4757                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4758                         iwm_init(sc);
4759                         startall = 1;
4760                 }
4761         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4762                 iwm_stop(sc);
4763         IWM_UNLOCK(sc);
4764         if (startall)
4765                 ieee80211_start_all(ic);
4766 }
4767
4768 /*
4769  * The interrupt side of things
4770  */
4771
4772 /*
4773  * error dumping routines are from iwlwifi/mvm/utils.c
4774  */
4775
4776 /*
4777  * Note: This structure is read from the device with IO accesses,
4778  * and the reading already does the endian conversion. As it is
4779  * read with uint32_t-sized accesses, any members with a different size
4780  * need to be ordered correctly though!
4781  */
4782 struct iwm_error_event_table {
4783         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4784         uint32_t error_id;              /* type of error */
4785         uint32_t trm_hw_status0;        /* TRM HW status */
4786         uint32_t trm_hw_status1;        /* TRM HW status */
4787         uint32_t blink2;                /* branch link */
4788         uint32_t ilink1;                /* interrupt link */
4789         uint32_t ilink2;                /* interrupt link */
4790         uint32_t data1;         /* error-specific data */
4791         uint32_t data2;         /* error-specific data */
4792         uint32_t data3;         /* error-specific data */
4793         uint32_t bcon_time;             /* beacon timer */
4794         uint32_t tsf_low;               /* network timestamp function timer */
4795         uint32_t tsf_hi;                /* network timestamp function timer */
4796         uint32_t gp1;           /* GP1 timer register */
4797         uint32_t gp2;           /* GP2 timer register */
4798         uint32_t fw_rev_type;   /* firmware revision type */
4799         uint32_t major;         /* uCode version major */
4800         uint32_t minor;         /* uCode version minor */
4801         uint32_t hw_ver;                /* HW Silicon version */
4802         uint32_t brd_ver;               /* HW board version */
4803         uint32_t log_pc;                /* log program counter */
4804         uint32_t frame_ptr;             /* frame pointer */
4805         uint32_t stack_ptr;             /* stack pointer */
4806         uint32_t hcmd;          /* last host command header */
4807         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4808                                  * rxtx_flag */
4809         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4810                                  * host_flag */
4811         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4812                                  * enc_flag */
4813         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4814                                  * time_flag */
4815         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4816                                  * wico interrupt */
4817         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
4818         uint32_t wait_event;            /* wait event() caller address */
4819         uint32_t l2p_control;   /* L2pControlField */
4820         uint32_t l2p_duration;  /* L2pDurationField */
4821         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4822         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4823         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4824                                  * (LMPM_PMG_SEL) */
4825         uint32_t u_timestamp;   /* indicate when the date and time of the
4826                                  * compilation */
4827         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
4828 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4829
4830 /*
4831  * UMAC error struct - relevant starting from family 8000 chip.
4832  * Note: This structure is read from the device with IO accesses,
4833  * and the reading already does the endian conversion. As it is
4834  * read with u32-sized accesses, any members with a different size
4835  * need to be ordered correctly though!
4836  */
4837 struct iwm_umac_error_event_table {
4838         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4839         uint32_t error_id;      /* type of error */
4840         uint32_t blink1;        /* branch link */
4841         uint32_t blink2;        /* branch link */
4842         uint32_t ilink1;        /* interrupt link */
4843         uint32_t ilink2;        /* interrupt link */
4844         uint32_t data1;         /* error-specific data */
4845         uint32_t data2;         /* error-specific data */
4846         uint32_t data3;         /* error-specific data */
4847         uint32_t umac_major;
4848         uint32_t umac_minor;
4849         uint32_t frame_pointer; /* core register 27*/
4850         uint32_t stack_pointer; /* core register 28 */
4851         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
4852         uint32_t nic_isr_pref;  /* ISR status register */
4853 } __packed;
4854
4855 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4856 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4857
4858 #ifdef IWM_DEBUG
4859 struct {
4860         const char *name;
4861         uint8_t num;
4862 } advanced_lookup[] = {
4863         { "NMI_INTERRUPT_WDG", 0x34 },
4864         { "SYSASSERT", 0x35 },
4865         { "UCODE_VERSION_MISMATCH", 0x37 },
4866         { "BAD_COMMAND", 0x38 },
4867         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4868         { "FATAL_ERROR", 0x3D },
4869         { "NMI_TRM_HW_ERR", 0x46 },
4870         { "NMI_INTERRUPT_TRM", 0x4C },
4871         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4872         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4873         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4874         { "NMI_INTERRUPT_HOST", 0x66 },
4875         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4876         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4877         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4878         { "ADVANCED_SYSASSERT", 0 },
4879 };
4880
4881 static const char *
4882 iwm_desc_lookup(uint32_t num)
4883 {
4884         int i;
4885
4886         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4887                 if (advanced_lookup[i].num == num)
4888                         return advanced_lookup[i].name;
4889
4890         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4891         return advanced_lookup[i].name;
4892 }
4893
4894 static void
4895 iwm_nic_umac_error(struct iwm_softc *sc)
4896 {
4897         struct iwm_umac_error_event_table table;
4898         uint32_t base;
4899
4900         base = sc->sc_uc.uc_umac_error_event_table;
4901
4902         if (base < 0x800000) {
4903                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
4904                     base);
4905                 return;
4906         }
4907
4908         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4909                 device_printf(sc->sc_dev, "reading errlog failed\n");
4910                 return;
4911         }
4912
4913         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4914                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
4915                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4916                     sc->sc_flags, table.valid);
4917         }
4918
4919         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
4920                 iwm_desc_lookup(table.error_id));
4921         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
4922         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
4923         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
4924             table.ilink1);
4925         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
4926             table.ilink2);
4927         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
4928         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
4929         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
4930         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
4931         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
4932         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
4933             table.frame_pointer);
4934         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
4935             table.stack_pointer);
4936         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
4937         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
4938             table.nic_isr_pref);
4939 }
4940
4941 /*
4942  * Support for dumping the error log seemed like a good idea ...
4943  * but it's mostly hex junk and the only sensible thing is the
4944  * hw/ucode revision (which we know anyway).  Since it's here,
4945  * I'll just leave it in, just in case e.g. the Intel guys want to
4946  * help us decipher some "ADVANCED_SYSASSERT" later.
4947  */
4948 static void
4949 iwm_nic_error(struct iwm_softc *sc)
4950 {
4951         struct iwm_error_event_table table;
4952         uint32_t base;
4953
4954         device_printf(sc->sc_dev, "dumping device error log\n");
4955         base = sc->sc_uc.uc_error_event_table;
4956         if (base < 0x800000) {
4957                 device_printf(sc->sc_dev,
4958                     "Invalid error log pointer 0x%08x\n", base);
4959                 return;
4960         }
4961
4962         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4963                 device_printf(sc->sc_dev, "reading errlog failed\n");
4964                 return;
4965         }
4966
4967         if (!table.valid) {
4968                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
4969                 return;
4970         }
4971
4972         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4973                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
4974                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4975                     sc->sc_flags, table.valid);
4976         }
4977
4978         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
4979             iwm_desc_lookup(table.error_id));
4980         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
4981             table.trm_hw_status0);
4982         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
4983             table.trm_hw_status1);
4984         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
4985         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
4986         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
4987         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
4988         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
4989         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
4990         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
4991         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
4992         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
4993         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
4994         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
4995         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
4996             table.fw_rev_type);
4997         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
4998         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
4999         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5000         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5001         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5002         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5003         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5004         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5005         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5006         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5007         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5008         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5009         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5010         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5011         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5012         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5013         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5014         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5015         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5016
5017         if (sc->sc_uc.uc_umac_error_event_table)
5018                 iwm_nic_umac_error(sc);
5019 }
5020 #endif
5021
5022 #define SYNC_RESP_STRUCT(_var_, _pkt_)                                  \
5023 do {                                                                    \
5024         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
5025         _var_ = (void *)((_pkt_)+1);                                    \
5026 } while (/*CONSTCOND*/0)
5027
5028 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)                              \
5029 do {                                                                    \
5030         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
5031         _ptr_ = (void *)((_pkt_)+1);                                    \
5032 } while (/*CONSTCOND*/0)
5033
5034 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5035
5036 /*
5037  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5038  * Basic structure from if_iwn
5039  */
5040 static void
5041 iwm_notif_intr(struct iwm_softc *sc)
5042 {
5043         struct ieee80211com *ic = &sc->sc_ic;
5044         uint16_t hw;
5045
5046         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5047             BUS_DMASYNC_POSTREAD);
5048
5049         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5050
5051         /*
5052          * Process responses
5053          */
5054         while (sc->rxq.cur != hw) {
5055                 struct iwm_rx_ring *ring = &sc->rxq;
5056                 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
5057                 struct iwm_rx_packet *pkt;
5058                 struct iwm_cmd_response *cresp;
5059                 int qid, idx, code;
5060
5061                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5062                     BUS_DMASYNC_POSTREAD);
5063                 pkt = mtod(data->m, struct iwm_rx_packet *);
5064
5065                 qid = pkt->hdr.qid & ~0x80;
5066                 idx = pkt->hdr.idx;
5067
5068                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5069                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5070                     "rx packet qid=%d idx=%d type=%x %d %d\n",
5071                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, sc->rxq.cur, hw);
5072
5073                 /*
5074                  * randomly get these from the firmware, no idea why.
5075                  * they at least seem harmless, so just ignore them for now
5076                  */
5077                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5078                     || pkt->len_n_flags == htole32(0x55550000))) {
5079                         ADVANCE_RXQ(sc);
5080                         continue;
5081                 }
5082
5083                 switch (code) {
5084                 case IWM_REPLY_RX_PHY_CMD:
5085                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5086                         break;
5087
5088                 case IWM_REPLY_RX_MPDU_CMD:
5089                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5090                         break;
5091
5092                 case IWM_TX_CMD:
5093                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
5094                         break;
5095
5096                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5097                         struct iwm_missed_beacons_notif *resp;
5098                         int missed;
5099
5100                         /* XXX look at mac_id to determine interface ID */
5101                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5102
5103                         SYNC_RESP_STRUCT(resp, pkt);
5104                         missed = le32toh(resp->consec_missed_beacons);
5105
5106                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5107                             "%s: MISSED_BEACON: mac_id=%d, "
5108                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5109                             "num_rx=%d\n",
5110                             __func__,
5111                             le32toh(resp->mac_id),
5112                             le32toh(resp->consec_missed_beacons_since_last_rx),
5113                             le32toh(resp->consec_missed_beacons),
5114                             le32toh(resp->num_expected_beacons),
5115                             le32toh(resp->num_recvd_beacons));
5116
5117                         /* Be paranoid */
5118                         if (vap == NULL)
5119                                 break;
5120
5121                         /* XXX no net80211 locking? */
5122                         if (vap->iv_state == IEEE80211_S_RUN &&
5123                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5124                                 if (missed > vap->iv_bmissthreshold) {
5125                                         /* XXX bad locking; turn into task */
5126                                         IWM_UNLOCK(sc);
5127                                         ieee80211_beacon_miss(ic);
5128                                         IWM_LOCK(sc);
5129                                 }
5130                         }
5131
5132                         break; }
5133
5134                 case IWM_MFUART_LOAD_NOTIFICATION:
5135                         break;
5136
5137                 case IWM_MVM_ALIVE: {
5138                         struct iwm_mvm_alive_resp_v1 *resp1;
5139                         struct iwm_mvm_alive_resp_v2 *resp2;
5140                         struct iwm_mvm_alive_resp_v3 *resp3;
5141
5142                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
5143                                 SYNC_RESP_STRUCT(resp1, pkt);
5144                                 sc->sc_uc.uc_error_event_table
5145                                     = le32toh(resp1->error_event_table_ptr);
5146                                 sc->sc_uc.uc_log_event_table
5147                                     = le32toh(resp1->log_event_table_ptr);
5148                                 sc->sched_base = le32toh(resp1->scd_base_ptr);
5149                                 if (resp1->status == IWM_ALIVE_STATUS_OK)
5150                                         sc->sc_uc.uc_ok = 1;
5151                                 else
5152                                         sc->sc_uc.uc_ok = 0;
5153                         }
5154
5155                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
5156                                 SYNC_RESP_STRUCT(resp2, pkt);
5157                                 sc->sc_uc.uc_error_event_table
5158                                     = le32toh(resp2->error_event_table_ptr);
5159                                 sc->sc_uc.uc_log_event_table
5160                                     = le32toh(resp2->log_event_table_ptr);
5161                                 sc->sched_base = le32toh(resp2->scd_base_ptr);
5162                                 sc->sc_uc.uc_umac_error_event_table
5163                                     = le32toh(resp2->error_info_addr);
5164                                 if (resp2->status == IWM_ALIVE_STATUS_OK)
5165                                         sc->sc_uc.uc_ok = 1;
5166                                 else
5167                                         sc->sc_uc.uc_ok = 0;
5168                         }
5169
5170                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
5171                                 SYNC_RESP_STRUCT(resp3, pkt);
5172                                 sc->sc_uc.uc_error_event_table
5173                                     = le32toh(resp3->error_event_table_ptr);
5174                                 sc->sc_uc.uc_log_event_table
5175                                     = le32toh(resp3->log_event_table_ptr);
5176                                 sc->sched_base = le32toh(resp3->scd_base_ptr);
5177                                 sc->sc_uc.uc_umac_error_event_table
5178                                     = le32toh(resp3->error_info_addr);
5179                                 if (resp3->status == IWM_ALIVE_STATUS_OK)
5180                                         sc->sc_uc.uc_ok = 1;
5181                                 else
5182                                         sc->sc_uc.uc_ok = 0;
5183                         }
5184
5185                         sc->sc_uc.uc_intr = 1;
5186                         wakeup(&sc->sc_uc);
5187                         break; }
5188
5189                 case IWM_CALIB_RES_NOTIF_PHY_DB: {
5190                         struct iwm_calib_res_notif_phy_db *phy_db_notif;
5191                         SYNC_RESP_STRUCT(phy_db_notif, pkt);
5192
5193                         iwm_phy_db_set_section(sc, phy_db_notif);
5194
5195                         break; }
5196
5197                 case IWM_STATISTICS_NOTIFICATION: {
5198                         struct iwm_notif_statistics *stats;
5199                         SYNC_RESP_STRUCT(stats, pkt);
5200                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5201                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5202                         break; }
5203
5204                 case IWM_NVM_ACCESS_CMD:
5205                 case IWM_MCC_UPDATE_CMD:
5206                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5207                                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5208                                     BUS_DMASYNC_POSTREAD);
5209                                 memcpy(sc->sc_cmd_resp,
5210                                     pkt, sizeof(sc->sc_cmd_resp));
5211                         }
5212                         break;
5213
5214                 case IWM_MCC_CHUB_UPDATE_CMD: {
5215                         struct iwm_mcc_chub_notif *notif;
5216                         SYNC_RESP_STRUCT(notif, pkt);
5217
5218                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5219                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5220                         sc->sc_fw_mcc[2] = '\0';
5221                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5222                             "fw source %d sent CC '%s'\n",
5223                             notif->source_id, sc->sc_fw_mcc);
5224                         break; }
5225
5226                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5227                         break;
5228
5229                 case IWM_PHY_CONFIGURATION_CMD:
5230                 case IWM_TX_ANT_CONFIGURATION_CMD:
5231                 case IWM_ADD_STA:
5232                 case IWM_MAC_CONTEXT_CMD:
5233                 case IWM_REPLY_SF_CFG_CMD:
5234                 case IWM_POWER_TABLE_CMD:
5235                 case IWM_PHY_CONTEXT_CMD:
5236                 case IWM_BINDING_CONTEXT_CMD:
5237                 case IWM_TIME_EVENT_CMD:
5238                 case IWM_SCAN_REQUEST_CMD:
5239                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5240                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5241                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5242                 case IWM_REPLY_BEACON_FILTERING_CMD:
5243                 case IWM_MAC_PM_POWER_TABLE:
5244                 case IWM_TIME_QUOTA_CMD:
5245                 case IWM_REMOVE_STA:
5246                 case IWM_TXPATH_FLUSH:
5247                 case IWM_LQ_CMD:
5248                 case IWM_BT_CONFIG:
5249                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5250                         SYNC_RESP_STRUCT(cresp, pkt);
5251                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5252                                 memcpy(sc->sc_cmd_resp,
5253                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5254                         }
5255                         break;
5256
5257                 /* ignore */
5258                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5259                         break;
5260
5261                 case IWM_INIT_COMPLETE_NOTIF:
5262                         sc->sc_init_complete = 1;
5263                         wakeup(&sc->sc_init_complete);
5264                         break;
5265
5266                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5267                         struct iwm_periodic_scan_complete *notif;
5268                         SYNC_RESP_STRUCT(notif, pkt);
5269                         break;
5270                 }
5271
5272                 case IWM_SCAN_ITERATION_COMPLETE: {
5273                         struct iwm_lmac_scan_complete_notif *notif;
5274                         SYNC_RESP_STRUCT(notif, pkt);
5275                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5276                         break;
5277                 }
5278  
5279                 case IWM_SCAN_COMPLETE_UMAC: {
5280                         struct iwm_umac_scan_complete *notif;
5281                         SYNC_RESP_STRUCT(notif, pkt);
5282
5283                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5284                             "UMAC scan complete, status=0x%x\n",
5285                             notif->status);
5286 #if 0   /* XXX This would be a duplicate scan end call */
5287                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5288 #endif
5289                         break;
5290                 }
5291
5292                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5293                         struct iwm_umac_scan_iter_complete_notif *notif;
5294                         SYNC_RESP_STRUCT(notif, pkt);
5295
5296                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5297                             "complete, status=0x%x, %d channels scanned\n",
5298                             notif->status, notif->scanned_channels);
5299                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5300                         break;
5301                 }
5302
5303                 case IWM_REPLY_ERROR: {
5304                         struct iwm_error_resp *resp;
5305                         SYNC_RESP_STRUCT(resp, pkt);
5306
5307                         device_printf(sc->sc_dev,
5308                             "firmware error 0x%x, cmd 0x%x\n",
5309                             le32toh(resp->error_type),
5310                             resp->cmd_id);
5311                         break;
5312                 }
5313
5314                 case IWM_TIME_EVENT_NOTIFICATION: {
5315                         struct iwm_time_event_notif *notif;
5316                         SYNC_RESP_STRUCT(notif, pkt);
5317
5318                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5319                             "TE notif status = 0x%x action = 0x%x\n",
5320                             notif->status, notif->action);
5321                         break;
5322                 }
5323
5324                 case IWM_MCAST_FILTER_CMD:
5325                         break;
5326
5327                 case IWM_SCD_QUEUE_CFG: {
5328                         struct iwm_scd_txq_cfg_rsp *rsp;
5329                         SYNC_RESP_STRUCT(rsp, pkt);
5330
5331                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5332                             "queue cfg token=0x%x sta_id=%d "
5333                             "tid=%d scd_queue=%d\n",
5334                             rsp->token, rsp->sta_id, rsp->tid,
5335                             rsp->scd_queue);
5336                         break;
5337                 }
5338
5339                 default:
5340                         device_printf(sc->sc_dev,
5341                             "frame %d/%d %x UNHANDLED (this should "
5342                             "not happen)\n", qid, idx,
5343                             pkt->len_n_flags);
5344                         break;
5345                 }
5346
5347                 /*
5348                  * Why test bit 0x80?  The Linux driver:
5349                  *
5350                  * There is one exception:  uCode sets bit 15 when it
5351                  * originates the response/notification, i.e. when the
5352                  * response/notification is not a direct response to a
5353                  * command sent by the driver.  For example, uCode issues
5354                  * IWM_REPLY_RX when it sends a received frame to the driver;
5355                  * it is not a direct response to any driver command.
5356                  *
5357                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5358                  * uses a slightly different format for pkt->hdr, and "qid"
5359                  * is actually the upper byte of a two-byte field.
5360                  */
5361                 if (!(pkt->hdr.qid & (1 << 7))) {
5362                         iwm_cmd_done(sc, pkt);
5363                 }
5364
5365                 ADVANCE_RXQ(sc);
5366         }
5367
5368         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
5369             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5370
5371         /*
5372          * Tell the firmware what we have processed.
5373          * Seems like the hardware gets upset unless we align
5374          * the write by 8??
5375          */
5376         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5377         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5378 }
5379
5380 static void
5381 iwm_intr(void *arg)
5382 {
5383         struct iwm_softc *sc = arg;
5384         int handled = 0;
5385         int r1, r2, rv = 0;
5386         int isperiodic = 0;
5387
5388         IWM_LOCK(sc);
5389         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5390
5391         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5392                 uint32_t *ict = sc->ict_dma.vaddr;
5393                 int tmp;
5394
5395                 tmp = htole32(ict[sc->ict_cur]);
5396                 if (!tmp)
5397                         goto out_ena;
5398
5399                 /*
5400                  * ok, there was something.  keep plowing until we have all.
5401                  */
5402                 r1 = r2 = 0;
5403                 while (tmp) {
5404                         r1 |= tmp;
5405                         ict[sc->ict_cur] = 0;
5406                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5407                         tmp = htole32(ict[sc->ict_cur]);
5408                 }
5409
5410                 /* this is where the fun begins.  don't ask */
5411                 if (r1 == 0xffffffff)
5412                         r1 = 0;
5413
5414                 /* i am not expected to understand this */
5415                 if (r1 & 0xc0000)
5416                         r1 |= 0x8000;
5417                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5418         } else {
5419                 r1 = IWM_READ(sc, IWM_CSR_INT);
5420                 /* "hardware gone" (where, fishing?) */
5421                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5422                         goto out;
5423                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5424         }
5425         if (r1 == 0 && r2 == 0) {
5426                 goto out_ena;
5427         }
5428
5429         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5430
5431         /* ignored */
5432         handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
5433
5434         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5435                 int i;
5436                 struct ieee80211com *ic = &sc->sc_ic;
5437                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5438
5439 #ifdef IWM_DEBUG
5440                 iwm_nic_error(sc);
5441 #endif
5442                 /* Dump driver status (TX and RX rings) while we're here. */
5443                 device_printf(sc->sc_dev, "driver status:\n");
5444                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5445                         struct iwm_tx_ring *ring = &sc->txq[i];
5446                         device_printf(sc->sc_dev,
5447                             "  tx ring %2d: qid=%-2d cur=%-3d "
5448                             "queued=%-3d\n",
5449                             i, ring->qid, ring->cur, ring->queued);
5450                 }
5451                 device_printf(sc->sc_dev,
5452                     "  rx ring: cur=%d\n", sc->rxq.cur);
5453                 device_printf(sc->sc_dev,
5454                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5455
5456                 /* Don't stop the device; just do a VAP restart */
5457                 IWM_UNLOCK(sc);
5458
5459                 if (vap == NULL) {
5460                         printf("%s: null vap\n", __func__);
5461                         return;
5462                 }
5463
5464                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5465                     "restarting\n", __func__, vap->iv_state);
5466
5467                 /* XXX TODO: turn this into a callout/taskqueue */
5468                 ieee80211_restart_all(ic);
5469                 return;
5470         }
5471
5472         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5473                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5474                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5475                 iwm_stop(sc);
5476                 rv = 1;
5477                 goto out;
5478         }
5479
5480         /* firmware chunk loaded */
5481         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5482                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5483                 handled |= IWM_CSR_INT_BIT_FH_TX;
5484                 sc->sc_fw_chunk_done = 1;
5485                 wakeup(&sc->sc_fw);
5486         }
5487
5488         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5489                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5490                 if (iwm_check_rfkill(sc)) {
5491                         device_printf(sc->sc_dev,
5492                             "%s: rfkill switch, disabling interface\n",
5493                             __func__);
5494                         iwm_stop(sc);
5495                 }
5496         }
5497
5498         /*
5499          * The Linux driver uses periodic interrupts to avoid races.
5500          * We cargo-cult like it's going out of fashion.
5501          */
5502         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5503                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5504                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5505                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5506                         IWM_WRITE_1(sc,
5507                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5508                 isperiodic = 1;
5509         }
5510
5511         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5512                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5513                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5514
5515                 iwm_notif_intr(sc);
5516
5517                 /* enable periodic interrupt, see above */
5518                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5519                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5520                             IWM_CSR_INT_PERIODIC_ENA);
5521         }
5522
5523         if (__predict_false(r1 & ~handled))
5524                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5525                     "%s: unhandled interrupts: %x\n", __func__, r1);
5526         rv = 1;
5527
5528  out_ena:
5529         iwm_restore_interrupts(sc);
5530  out:
5531         IWM_UNLOCK(sc);
5532         return;
5533 }
5534
5535 /*
5536  * Autoconf glue-sniffing
5537  */
5538 #define PCI_VENDOR_INTEL                0x8086
5539 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5540 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5541 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5542 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5543 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5544 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5545 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5546 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5547 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5548 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5549
5550 static const struct iwm_devices {
5551         uint16_t        device;
5552         const char      *name;
5553 } iwm_devices[] = {
5554         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5555         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5556         { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5557         { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5558         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5559         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5560         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5561         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5562         { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5563         { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5564 };
5565
5566 static int
5567 iwm_probe(device_t dev)
5568 {
5569         int i;
5570
5571         for (i = 0; i < nitems(iwm_devices); i++) {
5572                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5573                     pci_get_device(dev) == iwm_devices[i].device) {
5574                         device_set_desc(dev, iwm_devices[i].name);
5575                         return (BUS_PROBE_DEFAULT);
5576                 }
5577         }
5578
5579         return (ENXIO);
5580 }
5581
5582 static int
5583 iwm_dev_check(device_t dev)
5584 {
5585         struct iwm_softc *sc;
5586
5587         sc = device_get_softc(dev);
5588
5589         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5590         switch (pci_get_device(dev)) {
5591         case PCI_PRODUCT_INTEL_WL_3160_1:
5592         case PCI_PRODUCT_INTEL_WL_3160_2:
5593                 sc->sc_fwname = "iwm3160fw";
5594                 sc->host_interrupt_operation_mode = 1;
5595                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5596                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5597                 return (0);
5598         case PCI_PRODUCT_INTEL_WL_3165_1:
5599         case PCI_PRODUCT_INTEL_WL_3165_2:
5600                 sc->sc_fwname = "iwm7265fw";
5601                 sc->host_interrupt_operation_mode = 0;
5602                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5603                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5604                 return (0);
5605         case PCI_PRODUCT_INTEL_WL_7260_1:
5606         case PCI_PRODUCT_INTEL_WL_7260_2:
5607                 sc->sc_fwname = "iwm7260fw";
5608                 sc->host_interrupt_operation_mode = 1;
5609                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5610                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5611                 return (0);
5612         case PCI_PRODUCT_INTEL_WL_7265_1:
5613         case PCI_PRODUCT_INTEL_WL_7265_2:
5614                 sc->sc_fwname = "iwm7265fw";
5615                 sc->host_interrupt_operation_mode = 0;
5616                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5617                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5618                 return (0);
5619         case PCI_PRODUCT_INTEL_WL_8260_1:
5620         case PCI_PRODUCT_INTEL_WL_8260_2:
5621                 sc->sc_fwname = "iwm8000Cfw";
5622                 sc->host_interrupt_operation_mode = 0;
5623                 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
5624                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
5625                 return (0);
5626         default:
5627                 device_printf(dev, "unknown adapter type\n");
5628                 return ENXIO;
5629         }
5630 }
5631
5632 static int
5633 iwm_pci_attach(device_t dev)
5634 {
5635         struct iwm_softc *sc;
5636         int count, error, rid;
5637         uint16_t reg;
5638
5639         sc = device_get_softc(dev);
5640
5641         /* Clear device-specific "PCI retry timeout" register (41h). */
5642         reg = pci_read_config(dev, 0x40, sizeof(reg));
5643         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5644
5645         /* Enable bus-mastering and hardware bug workaround. */
5646         pci_enable_busmaster(dev);
5647         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5648         /* if !MSI */
5649         if (reg & PCIM_STATUS_INTxSTATE) {
5650                 reg &= ~PCIM_STATUS_INTxSTATE;
5651         }
5652         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5653
5654         rid = PCIR_BAR(0);
5655         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5656             RF_ACTIVE);
5657         if (sc->sc_mem == NULL) {
5658                 device_printf(sc->sc_dev, "can't map mem space\n");
5659                 return (ENXIO);
5660         }
5661         sc->sc_st = rman_get_bustag(sc->sc_mem);
5662         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5663
5664         /* Install interrupt handler. */
5665         count = 1;
5666         rid = 0;
5667         if (pci_alloc_msi(dev, &count) == 0)
5668                 rid = 1;
5669         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5670             (rid != 0 ? 0 : RF_SHAREABLE));
5671         if (sc->sc_irq == NULL) {
5672                 device_printf(dev, "can't map interrupt\n");
5673                         return (ENXIO);
5674         }
5675         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5676             NULL, iwm_intr, sc, &sc->sc_ih);
5677         if (sc->sc_ih == NULL) {
5678                 device_printf(dev, "can't establish interrupt");
5679                         return (ENXIO);
5680         }
5681         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5682
5683         return (0);
5684 }
5685
5686 static void
5687 iwm_pci_detach(device_t dev)
5688 {
5689         struct iwm_softc *sc = device_get_softc(dev);
5690
5691         if (sc->sc_irq != NULL) {
5692                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5693                 bus_release_resource(dev, SYS_RES_IRQ,
5694                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5695                 pci_release_msi(dev);
5696         }
5697         if (sc->sc_mem != NULL)
5698                 bus_release_resource(dev, SYS_RES_MEMORY,
5699                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5700 }
5701
5702
5703
5704 static int
5705 iwm_attach(device_t dev)
5706 {
5707         struct iwm_softc *sc = device_get_softc(dev);
5708         struct ieee80211com *ic = &sc->sc_ic;
5709         int error;
5710         int txq_i, i;
5711
5712         sc->sc_dev = dev;
5713         IWM_LOCK_INIT(sc);
5714         mbufq_init(&sc->sc_snd, ifqmaxlen);
5715         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5716         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5717         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5718
5719         /* PCI attach */
5720         error = iwm_pci_attach(dev);
5721         if (error != 0)
5722                 goto fail;
5723
5724         sc->sc_wantresp = -1;
5725
5726         /* Check device type */
5727         error = iwm_dev_check(dev);
5728         if (error != 0)
5729                 goto fail;
5730
5731         /*
5732          * We now start fiddling with the hardware
5733          */
5734         /*
5735          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5736          * changed, and now the revision step also includes bit 0-1 (no more
5737          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5738          * in the old format.
5739          */
5740         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
5741                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5742                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5743
5744         if (iwm_prepare_card_hw(sc) != 0) {
5745                 device_printf(dev, "could not initialize hardware\n");
5746                 goto fail;
5747         }
5748
5749         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
5750                 int ret;
5751                 uint32_t hw_step;
5752
5753                 /*
5754                  * In order to recognize C step the driver should read the
5755                  * chip version id located at the AUX bus MISC address.
5756                  */
5757                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5758                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5759                 DELAY(2);
5760
5761                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5762                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5763                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5764                                    25000);
5765                 if (ret < 0) {
5766                         device_printf(sc->sc_dev,
5767                             "Failed to wake up the nic\n");
5768                         goto fail;
5769                 }
5770
5771                 if (iwm_nic_lock(sc)) {
5772                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5773                         hw_step |= IWM_ENABLE_WFPM;
5774                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5775                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5776                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5777                         if (hw_step == 0x3)
5778                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5779                                                 (IWM_SILICON_C_STEP << 2);
5780                         iwm_nic_unlock(sc);
5781                 } else {
5782                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
5783                         goto fail;
5784                 }
5785         }
5786
5787         /* Allocate DMA memory for firmware transfers. */
5788         if ((error = iwm_alloc_fwmem(sc)) != 0) {
5789                 device_printf(dev, "could not allocate memory for firmware\n");
5790                 goto fail;
5791         }
5792
5793         /* Allocate "Keep Warm" page. */
5794         if ((error = iwm_alloc_kw(sc)) != 0) {
5795                 device_printf(dev, "could not allocate keep warm page\n");
5796                 goto fail;
5797         }
5798
5799         /* We use ICT interrupts */
5800         if ((error = iwm_alloc_ict(sc)) != 0) {
5801                 device_printf(dev, "could not allocate ICT table\n");
5802                 goto fail;
5803         }
5804
5805         /* Allocate TX scheduler "rings". */
5806         if ((error = iwm_alloc_sched(sc)) != 0) {
5807                 device_printf(dev, "could not allocate TX scheduler rings\n");
5808                 goto fail;
5809         }
5810
5811         /* Allocate TX rings */
5812         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5813                 if ((error = iwm_alloc_tx_ring(sc,
5814                     &sc->txq[txq_i], txq_i)) != 0) {
5815                         device_printf(dev,
5816                             "could not allocate TX ring %d\n",
5817                             txq_i);
5818                         goto fail;
5819                 }
5820         }
5821
5822         /* Allocate RX ring. */
5823         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5824                 device_printf(dev, "could not allocate RX ring\n");
5825                 goto fail;
5826         }
5827
5828         /* Clear pending interrupts. */
5829         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5830
5831         ic->ic_softc = sc;
5832         ic->ic_name = device_get_nameunit(sc->sc_dev);
5833         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
5834         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
5835
5836         /* Set device capabilities. */
5837         ic->ic_caps =
5838             IEEE80211_C_STA |
5839             IEEE80211_C_WPA |           /* WPA/RSN */
5840             IEEE80211_C_WME |
5841             IEEE80211_C_SHSLOT |        /* short slot time supported */
5842             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
5843 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
5844             ;
5845         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5846                 sc->sc_phyctxt[i].id = i;
5847                 sc->sc_phyctxt[i].color = 0;
5848                 sc->sc_phyctxt[i].ref = 0;
5849                 sc->sc_phyctxt[i].channel = NULL;
5850         }
5851
5852         /* Default noise floor */
5853         sc->sc_noise = -96;
5854
5855         /* Max RSSI */
5856         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5857
5858         sc->sc_preinit_hook.ich_func = iwm_preinit;
5859         sc->sc_preinit_hook.ich_arg = sc;
5860         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5861                 device_printf(dev, "config_intrhook_establish failed\n");
5862                 goto fail;
5863         }
5864
5865 #ifdef IWM_DEBUG
5866         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5867             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
5868             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
5869 #endif
5870
5871         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5872             "<-%s\n", __func__);
5873
5874         return 0;
5875
5876         /* Free allocated memory if something failed during attachment. */
5877 fail:
5878         iwm_detach_local(sc, 0);
5879
5880         return ENXIO;
5881 }
5882
5883 static int
5884 iwm_is_valid_ether_addr(uint8_t *addr)
5885 {
5886         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
5887
5888         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
5889                 return (FALSE);
5890
5891         return (TRUE);
5892 }
5893
5894 static int
5895 iwm_update_edca(struct ieee80211com *ic)
5896 {
5897         struct iwm_softc *sc = ic->ic_softc;
5898
5899         device_printf(sc->sc_dev, "%s: called\n", __func__);
5900         return (0);
5901 }
5902
5903 static void
5904 iwm_preinit(void *arg)
5905 {
5906         struct iwm_softc *sc = arg;
5907         device_t dev = sc->sc_dev;
5908         struct ieee80211com *ic = &sc->sc_ic;
5909         int error;
5910
5911         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5912             "->%s\n", __func__);
5913
5914         IWM_LOCK(sc);
5915         if ((error = iwm_start_hw(sc)) != 0) {
5916                 device_printf(dev, "could not initialize hardware\n");
5917                 IWM_UNLOCK(sc);
5918                 goto fail;
5919         }
5920
5921         error = iwm_run_init_mvm_ucode(sc, 1);
5922         iwm_stop_device(sc);
5923         if (error) {
5924                 IWM_UNLOCK(sc);
5925                 goto fail;
5926         }
5927         device_printf(dev,
5928             "hw rev 0x%x, fw ver %s, address %s\n",
5929             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
5930             sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
5931
5932         /* not all hardware can do 5GHz band */
5933         if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
5934                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
5935                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
5936         IWM_UNLOCK(sc);
5937
5938         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
5939             ic->ic_channels);
5940
5941         /*
5942          * At this point we've committed - if we fail to do setup,
5943          * we now also have to tear down the net80211 state.
5944          */
5945         ieee80211_ifattach(ic);
5946         ic->ic_vap_create = iwm_vap_create;
5947         ic->ic_vap_delete = iwm_vap_delete;
5948         ic->ic_raw_xmit = iwm_raw_xmit;
5949         ic->ic_node_alloc = iwm_node_alloc;
5950         ic->ic_scan_start = iwm_scan_start;
5951         ic->ic_scan_end = iwm_scan_end;
5952         ic->ic_update_mcast = iwm_update_mcast;
5953         ic->ic_getradiocaps = iwm_init_channel_map;
5954         ic->ic_set_channel = iwm_set_channel;
5955         ic->ic_scan_curchan = iwm_scan_curchan;
5956         ic->ic_scan_mindwell = iwm_scan_mindwell;
5957         ic->ic_wme.wme_update = iwm_update_edca;
5958         ic->ic_parent = iwm_parent;
5959         ic->ic_transmit = iwm_transmit;
5960         iwm_radiotap_attach(sc);
5961         if (bootverbose)
5962                 ieee80211_announce(ic);
5963
5964         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5965             "<-%s\n", __func__);
5966         config_intrhook_disestablish(&sc->sc_preinit_hook);
5967
5968         return;
5969 fail:
5970         config_intrhook_disestablish(&sc->sc_preinit_hook);
5971         iwm_detach_local(sc, 0);
5972 }
5973
5974 /*
5975  * Attach the interface to 802.11 radiotap.
5976  */
5977 static void
5978 iwm_radiotap_attach(struct iwm_softc *sc)
5979 {
5980         struct ieee80211com *ic = &sc->sc_ic;
5981
5982         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5983             "->%s begin\n", __func__);
5984         ieee80211_radiotap_attach(ic,
5985             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
5986                 IWM_TX_RADIOTAP_PRESENT,
5987             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
5988                 IWM_RX_RADIOTAP_PRESENT);
5989         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5990             "->%s end\n", __func__);
5991 }
5992
5993 static struct ieee80211vap *
5994 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
5995     enum ieee80211_opmode opmode, int flags,
5996     const uint8_t bssid[IEEE80211_ADDR_LEN],
5997     const uint8_t mac[IEEE80211_ADDR_LEN])
5998 {
5999         struct iwm_vap *ivp;
6000         struct ieee80211vap *vap;
6001
6002         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6003                 return NULL;
6004         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6005         vap = &ivp->iv_vap;
6006         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6007         vap->iv_bmissthreshold = 10;            /* override default */
6008         /* Override with driver methods. */
6009         ivp->iv_newstate = vap->iv_newstate;
6010         vap->iv_newstate = iwm_newstate;
6011
6012         ieee80211_ratectl_init(vap);
6013         /* Complete setup. */
6014         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6015             mac);
6016         ic->ic_opmode = opmode;
6017
6018         return vap;
6019 }
6020
6021 static void
6022 iwm_vap_delete(struct ieee80211vap *vap)
6023 {
6024         struct iwm_vap *ivp = IWM_VAP(vap);
6025
6026         ieee80211_ratectl_deinit(vap);
6027         ieee80211_vap_detach(vap);
6028         free(ivp, M_80211_VAP);
6029 }
6030
6031 static void
6032 iwm_scan_start(struct ieee80211com *ic)
6033 {
6034         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6035         struct iwm_softc *sc = ic->ic_softc;
6036         int error;
6037
6038         IWM_LOCK(sc);
6039         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6040                 error = iwm_mvm_umac_scan(sc);
6041         else
6042                 error = iwm_mvm_lmac_scan(sc);
6043         if (error != 0) {
6044                 device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
6045                 IWM_UNLOCK(sc);
6046                 ieee80211_cancel_scan(vap);
6047         } else {
6048                 iwm_led_blink_start(sc);
6049                 IWM_UNLOCK(sc);
6050         }
6051 }
6052
6053 static void
6054 iwm_scan_end(struct ieee80211com *ic)
6055 {
6056         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6057         struct iwm_softc *sc = ic->ic_softc;
6058
6059         IWM_LOCK(sc);
6060         iwm_led_blink_stop(sc);
6061         if (vap->iv_state == IEEE80211_S_RUN)
6062                 iwm_mvm_led_enable(sc);
6063         IWM_UNLOCK(sc);
6064 }
6065
6066 static void
6067 iwm_update_mcast(struct ieee80211com *ic)
6068 {
6069 }
6070
6071 static void
6072 iwm_set_channel(struct ieee80211com *ic)
6073 {
6074 }
6075
6076 static void
6077 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6078 {
6079 }
6080
6081 static void
6082 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6083 {
6084         return;
6085 }
6086
6087 void
6088 iwm_init_task(void *arg1)
6089 {
6090         struct iwm_softc *sc = arg1;
6091
6092         IWM_LOCK(sc);
6093         while (sc->sc_flags & IWM_FLAG_BUSY)
6094                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6095         sc->sc_flags |= IWM_FLAG_BUSY;
6096         iwm_stop(sc);
6097         if (sc->sc_ic.ic_nrunning > 0)
6098                 iwm_init(sc);
6099         sc->sc_flags &= ~IWM_FLAG_BUSY;
6100         wakeup(&sc->sc_flags);
6101         IWM_UNLOCK(sc);
6102 }
6103
6104 static int
6105 iwm_resume(device_t dev)
6106 {
6107         struct iwm_softc *sc = device_get_softc(dev);
6108         int do_reinit = 0;
6109         uint16_t reg;
6110
6111         /* Clear device-specific "PCI retry timeout" register (41h). */
6112         reg = pci_read_config(dev, 0x40, sizeof(reg));
6113         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
6114         iwm_init_task(device_get_softc(dev));
6115
6116         IWM_LOCK(sc);
6117         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6118                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6119                 do_reinit = 1;
6120         }
6121         IWM_UNLOCK(sc);
6122
6123         if (do_reinit)
6124                 ieee80211_resume_all(&sc->sc_ic);
6125
6126         return 0;
6127 }
6128
6129 static int
6130 iwm_suspend(device_t dev)
6131 {
6132         int do_stop = 0;
6133         struct iwm_softc *sc = device_get_softc(dev);
6134
6135         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6136
6137         ieee80211_suspend_all(&sc->sc_ic);
6138
6139         if (do_stop) {
6140                 IWM_LOCK(sc);
6141                 iwm_stop(sc);
6142                 sc->sc_flags |= IWM_FLAG_SCANNING;
6143                 IWM_UNLOCK(sc);
6144         }
6145
6146         return (0);
6147 }
6148
6149 static int
6150 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6151 {
6152         struct iwm_fw_info *fw = &sc->sc_fw;
6153         device_t dev = sc->sc_dev;
6154         int i;
6155
6156         ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6157
6158         callout_drain(&sc->sc_led_blink_to);
6159         callout_drain(&sc->sc_watchdog_to);
6160         iwm_stop_device(sc);
6161         if (do_net80211) {
6162                 ieee80211_ifdetach(&sc->sc_ic);
6163         }
6164
6165         iwm_phy_db_free(sc);
6166
6167         /* Free descriptor rings */
6168         iwm_free_rx_ring(sc, &sc->rxq);
6169         for (i = 0; i < nitems(sc->txq); i++)
6170                 iwm_free_tx_ring(sc, &sc->txq[i]);
6171
6172         /* Free firmware */
6173         if (fw->fw_fp != NULL)
6174                 iwm_fw_info_free(fw);
6175
6176         /* Free scheduler */
6177         iwm_free_sched(sc);
6178         if (sc->ict_dma.vaddr != NULL)
6179                 iwm_free_ict(sc);
6180         if (sc->kw_dma.vaddr != NULL)
6181                 iwm_free_kw(sc);
6182         if (sc->fw_dma.vaddr != NULL)
6183                 iwm_free_fwmem(sc);
6184
6185         /* Finished with the hardware - detach things */
6186         iwm_pci_detach(dev);
6187
6188         mbufq_drain(&sc->sc_snd);
6189         IWM_LOCK_DESTROY(sc);
6190
6191         return (0);
6192 }
6193
6194 static int
6195 iwm_detach(device_t dev)
6196 {
6197         struct iwm_softc *sc = device_get_softc(dev);
6198
6199         return (iwm_detach_local(sc, 1));
6200 }
6201
6202 static device_method_t iwm_pci_methods[] = {
6203         /* Device interface */
6204         DEVMETHOD(device_probe,         iwm_probe),
6205         DEVMETHOD(device_attach,        iwm_attach),
6206         DEVMETHOD(device_detach,        iwm_detach),
6207         DEVMETHOD(device_suspend,       iwm_suspend),
6208         DEVMETHOD(device_resume,        iwm_resume),
6209
6210         DEVMETHOD_END
6211 };
6212
6213 static driver_t iwm_pci_driver = {
6214         "iwm",
6215         iwm_pci_methods,
6216         sizeof (struct iwm_softc)
6217 };
6218
6219 static devclass_t iwm_devclass;
6220
6221 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6222 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6223 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6224 MODULE_DEPEND(iwm, wlan, 1, 1, 1);