]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
Merge bmake-20161212
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109
110 #include <sys/param.h>
111 #include <sys/bus.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
126
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
130
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
133
134 #include <net/bpf.h>
135
136 #include <net/if.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
142
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
147
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
152
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_util.h>
157 #include <dev/iwm/if_iwm_binding.h>
158 #include <dev/iwm/if_iwm_phy_db.h>
159 #include <dev/iwm/if_iwm_mac_ctxt.h>
160 #include <dev/iwm/if_iwm_phy_ctxt.h>
161 #include <dev/iwm/if_iwm_time_event.h>
162 #include <dev/iwm/if_iwm_power.h>
163 #include <dev/iwm/if_iwm_scan.h>
164
165 #include <dev/iwm/if_iwm_pcie_trans.h>
166 #include <dev/iwm/if_iwm_led.h>
167
168 const uint8_t iwm_nvm_channels[] = {
169         /* 2.4 GHz */
170         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
171         /* 5 GHz */
172         36, 40, 44, 48, 52, 56, 60, 64,
173         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
174         149, 153, 157, 161, 165
175 };
176 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
177     "IWM_NUM_CHANNELS is too small");
178
179 const uint8_t iwm_nvm_channels_8000[] = {
180         /* 2.4 GHz */
181         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
182         /* 5 GHz */
183         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
184         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
185         149, 153, 157, 161, 165, 169, 173, 177, 181
186 };
187 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
188     "IWM_NUM_CHANNELS_8000 is too small");
189
190 #define IWM_NUM_2GHZ_CHANNELS   14
191 #define IWM_N_HW_ADDR_MASK      0xF
192
193 /*
194  * XXX For now, there's simply a fixed set of rate table entries
195  * that are populated.
196  */
197 const struct iwm_rate {
198         uint8_t rate;
199         uint8_t plcp;
200 } iwm_rates[] = {
201         {   2,  IWM_RATE_1M_PLCP  },
202         {   4,  IWM_RATE_2M_PLCP  },
203         {  11,  IWM_RATE_5M_PLCP  },
204         {  22,  IWM_RATE_11M_PLCP },
205         {  12,  IWM_RATE_6M_PLCP  },
206         {  18,  IWM_RATE_9M_PLCP  },
207         {  24,  IWM_RATE_12M_PLCP },
208         {  36,  IWM_RATE_18M_PLCP },
209         {  48,  IWM_RATE_24M_PLCP },
210         {  72,  IWM_RATE_36M_PLCP },
211         {  96,  IWM_RATE_48M_PLCP },
212         { 108,  IWM_RATE_54M_PLCP },
213 };
214 #define IWM_RIDX_CCK    0
215 #define IWM_RIDX_OFDM   4
216 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
217 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
218 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
219
220 struct iwm_nvm_section {
221         uint16_t length;
222         uint8_t *data;
223 };
224
225 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
226 static int      iwm_firmware_store_section(struct iwm_softc *,
227                                            enum iwm_ucode_type,
228                                            const uint8_t *, size_t);
229 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
230 static void     iwm_fw_info_free(struct iwm_fw_info *);
231 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
232 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
233 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
234                                      bus_size_t, bus_size_t);
235 static void     iwm_dma_contig_free(struct iwm_dma_info *);
236 static int      iwm_alloc_fwmem(struct iwm_softc *);
237 static int      iwm_alloc_sched(struct iwm_softc *);
238 static int      iwm_alloc_kw(struct iwm_softc *);
239 static int      iwm_alloc_ict(struct iwm_softc *);
240 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
241 static void     iwm_disable_rx_dma(struct iwm_softc *);
242 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
243 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
244 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
245                                   int);
246 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
247 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
248 static void     iwm_enable_interrupts(struct iwm_softc *);
249 static void     iwm_restore_interrupts(struct iwm_softc *);
250 static void     iwm_disable_interrupts(struct iwm_softc *);
251 static void     iwm_ict_reset(struct iwm_softc *);
252 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
253 static void     iwm_stop_device(struct iwm_softc *);
254 static void     iwm_mvm_nic_config(struct iwm_softc *);
255 static int      iwm_nic_rx_init(struct iwm_softc *);
256 static int      iwm_nic_tx_init(struct iwm_softc *);
257 static int      iwm_nic_init(struct iwm_softc *);
258 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
259 static int      iwm_post_alive(struct iwm_softc *);
260 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
261                                    uint16_t, uint8_t *, uint16_t *);
262 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
263                                      uint16_t *, size_t);
264 static uint32_t iwm_eeprom_channel_flags(uint16_t);
265 static void     iwm_add_channel_band(struct iwm_softc *,
266                     struct ieee80211_channel[], int, int *, int, size_t,
267                     const uint8_t[]);
268 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
269                     struct ieee80211_channel[]);
270 static int      iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
271                                    const uint16_t *, const uint16_t *,
272                                    const uint16_t *, const uint16_t *,
273                                    const uint16_t *);
274 static void     iwm_set_hw_address_8000(struct iwm_softc *,
275                                         struct iwm_nvm_data *,
276                                         const uint16_t *, const uint16_t *);
277 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
278                             const uint16_t *);
279 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
280 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
281                                   const uint16_t *);
282 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
283                                    const uint16_t *);
284 static void     iwm_set_radio_cfg(const struct iwm_softc *,
285                                   struct iwm_nvm_data *, uint32_t);
286 static int      iwm_parse_nvm_sections(struct iwm_softc *,
287                                        struct iwm_nvm_section *);
288 static int      iwm_nvm_init(struct iwm_softc *);
289 static int      iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
290                                        const uint8_t *, uint32_t);
291 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
292                                         const uint8_t *, uint32_t);
293 static int      iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
294 static int      iwm_load_cpu_sections_8000(struct iwm_softc *,
295                                            struct iwm_fw_sects *, int , int *);
296 static int      iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
297 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
298 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
299 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
300 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
301 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
302                                               enum iwm_ucode_type);
303 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
304 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
305 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
306 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
307                                             struct iwm_rx_phy_info *);
308 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
309                                       struct iwm_rx_packet *,
310                                       struct iwm_rx_data *);
311 static int      iwm_get_noise(struct iwm_softc *sc,
312                     const struct iwm_mvm_statistics_rx_non_phy *);
313 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
314                                    struct iwm_rx_data *);
315 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
316                                          struct iwm_rx_packet *,
317                                          struct iwm_node *);
318 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
319                                   struct iwm_rx_data *);
320 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
321 #if 0
322 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
323                                  uint16_t);
324 #endif
325 static const struct iwm_rate *
326         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
327                         struct mbuf *, struct iwm_tx_cmd *);
328 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
329                        struct ieee80211_node *, int);
330 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
331                              const struct ieee80211_bpf_params *);
332 static int      iwm_mvm_flush_tx_path(struct iwm_softc *sc,
333                                       uint32_t tfd_msk, uint32_t flags);
334 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
335                                                 struct iwm_mvm_add_sta_cmd_v7 *,
336                                                 int *);
337 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
338                                        int);
339 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
340 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
341 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
342                                            struct iwm_int_sta *,
343                                            const uint8_t *, uint16_t, uint16_t);
344 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
345 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
346 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
347 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
348 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
349 static struct ieee80211_node *
350                 iwm_node_alloc(struct ieee80211vap *,
351                                const uint8_t[IEEE80211_ADDR_LEN]);
352 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
353 static int      iwm_media_change(struct ifnet *);
354 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
355 static void     iwm_endscan_cb(void *, int);
356 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
357                                         struct iwm_sf_cfg_cmd *,
358                                         struct ieee80211_node *);
359 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
360 static int      iwm_send_bt_init_conf(struct iwm_softc *);
361 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
362 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
363 static int      iwm_init_hw(struct iwm_softc *);
364 static void     iwm_init(struct iwm_softc *);
365 static void     iwm_start(struct iwm_softc *);
366 static void     iwm_stop(struct iwm_softc *);
367 static void     iwm_watchdog(void *);
368 static void     iwm_parent(struct ieee80211com *);
369 #ifdef IWM_DEBUG
370 static const char *
371                 iwm_desc_lookup(uint32_t);
372 static void     iwm_nic_error(struct iwm_softc *);
373 static void     iwm_nic_umac_error(struct iwm_softc *);
374 #endif
375 static void     iwm_notif_intr(struct iwm_softc *);
376 static void     iwm_intr(void *);
377 static int      iwm_attach(device_t);
378 static int      iwm_is_valid_ether_addr(uint8_t *);
379 static void     iwm_preinit(void *);
380 static int      iwm_detach_local(struct iwm_softc *sc, int);
381 static void     iwm_init_task(void *);
382 static void     iwm_radiotap_attach(struct iwm_softc *);
383 static struct ieee80211vap *
384                 iwm_vap_create(struct ieee80211com *,
385                                const char [IFNAMSIZ], int,
386                                enum ieee80211_opmode, int,
387                                const uint8_t [IEEE80211_ADDR_LEN],
388                                const uint8_t [IEEE80211_ADDR_LEN]);
389 static void     iwm_vap_delete(struct ieee80211vap *);
390 static void     iwm_scan_start(struct ieee80211com *);
391 static void     iwm_scan_end(struct ieee80211com *);
392 static void     iwm_update_mcast(struct ieee80211com *);
393 static void     iwm_set_channel(struct ieee80211com *);
394 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
395 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
396 static int      iwm_detach(device_t);
397
398 /*
399  * Firmware parser.
400  */
401
402 static int
403 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
404 {
405         const struct iwm_fw_cscheme_list *l = (const void *)data;
406
407         if (dlen < sizeof(*l) ||
408             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
409                 return EINVAL;
410
411         /* we don't actually store anything for now, always use s/w crypto */
412
413         return 0;
414 }
415
416 static int
417 iwm_firmware_store_section(struct iwm_softc *sc,
418     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
419 {
420         struct iwm_fw_sects *fws;
421         struct iwm_fw_onesect *fwone;
422
423         if (type >= IWM_UCODE_TYPE_MAX)
424                 return EINVAL;
425         if (dlen < sizeof(uint32_t))
426                 return EINVAL;
427
428         fws = &sc->sc_fw.fw_sects[type];
429         if (fws->fw_count >= IWM_UCODE_SECT_MAX)
430                 return EINVAL;
431
432         fwone = &fws->fw_sect[fws->fw_count];
433
434         /* first 32bit are device load offset */
435         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
436
437         /* rest is data */
438         fwone->fws_data = data + sizeof(uint32_t);
439         fwone->fws_len = dlen - sizeof(uint32_t);
440
441         fws->fw_count++;
442
443         return 0;
444 }
445
446 #define IWM_DEFAULT_SCAN_CHANNELS 40
447
448 /* iwlwifi: iwl-drv.c */
449 struct iwm_tlv_calib_data {
450         uint32_t ucode_type;
451         struct iwm_tlv_calib_ctrl calib;
452 } __packed;
453
454 static int
455 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
456 {
457         const struct iwm_tlv_calib_data *def_calib = data;
458         uint32_t ucode_type = le32toh(def_calib->ucode_type);
459
460         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
461                 device_printf(sc->sc_dev,
462                     "Wrong ucode_type %u for default "
463                     "calibration.\n", ucode_type);
464                 return EINVAL;
465         }
466
467         sc->sc_default_calib[ucode_type].flow_trigger =
468             def_calib->calib.flow_trigger;
469         sc->sc_default_calib[ucode_type].event_trigger =
470             def_calib->calib.event_trigger;
471
472         return 0;
473 }
474
475 static void
476 iwm_fw_info_free(struct iwm_fw_info *fw)
477 {
478         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
479         fw->fw_fp = NULL;
480         /* don't touch fw->fw_status */
481         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
482 }
483
484 static int
485 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
486 {
487         struct iwm_fw_info *fw = &sc->sc_fw;
488         const struct iwm_tlv_ucode_header *uhdr;
489         struct iwm_ucode_tlv tlv;
490         enum iwm_ucode_tlv_type tlv_type;
491         const struct firmware *fwp;
492         const uint8_t *data;
493         int error = 0;
494         size_t len;
495
496         if (fw->fw_status == IWM_FW_STATUS_DONE &&
497             ucode_type != IWM_UCODE_TYPE_INIT)
498                 return 0;
499
500         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
501                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
502         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
503
504         if (fw->fw_fp != NULL)
505                 iwm_fw_info_free(fw);
506
507         /*
508          * Load firmware into driver memory.
509          * fw_fp will be set.
510          */
511         IWM_UNLOCK(sc);
512         fwp = firmware_get(sc->sc_fwname);
513         IWM_LOCK(sc);
514         if (fwp == NULL) {
515                 device_printf(sc->sc_dev,
516                     "could not read firmware %s (error %d)\n",
517                     sc->sc_fwname, error);
518                 goto out;
519         }
520         fw->fw_fp = fwp;
521
522         /* (Re-)Initialize default values. */
523         sc->sc_capaflags = 0;
524         sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
525         memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
526         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
527
528         /*
529          * Parse firmware contents
530          */
531
532         uhdr = (const void *)fw->fw_fp->data;
533         if (*(const uint32_t *)fw->fw_fp->data != 0
534             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
535                 device_printf(sc->sc_dev, "invalid firmware %s\n",
536                     sc->sc_fwname);
537                 error = EINVAL;
538                 goto out;
539         }
540
541         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
542             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
543             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
544             IWM_UCODE_API(le32toh(uhdr->ver)));
545         data = uhdr->data;
546         len = fw->fw_fp->datasize - sizeof(*uhdr);
547
548         while (len >= sizeof(tlv)) {
549                 size_t tlv_len;
550                 const void *tlv_data;
551
552                 memcpy(&tlv, data, sizeof(tlv));
553                 tlv_len = le32toh(tlv.length);
554                 tlv_type = le32toh(tlv.type);
555
556                 len -= sizeof(tlv);
557                 data += sizeof(tlv);
558                 tlv_data = data;
559
560                 if (len < tlv_len) {
561                         device_printf(sc->sc_dev,
562                             "firmware too short: %zu bytes\n",
563                             len);
564                         error = EINVAL;
565                         goto parse_out;
566                 }
567
568                 switch ((int)tlv_type) {
569                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
570                         if (tlv_len < sizeof(uint32_t)) {
571                                 device_printf(sc->sc_dev,
572                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
573                                     __func__,
574                                     (int) tlv_len);
575                                 error = EINVAL;
576                                 goto parse_out;
577                         }
578                         sc->sc_capa_max_probe_len
579                             = le32toh(*(const uint32_t *)tlv_data);
580                         /* limit it to something sensible */
581                         if (sc->sc_capa_max_probe_len >
582                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
583                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
584                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
585                                     "ridiculous\n", __func__);
586                                 error = EINVAL;
587                                 goto parse_out;
588                         }
589                         break;
590                 case IWM_UCODE_TLV_PAN:
591                         if (tlv_len) {
592                                 device_printf(sc->sc_dev,
593                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
594                                     __func__,
595                                     (int) tlv_len);
596                                 error = EINVAL;
597                                 goto parse_out;
598                         }
599                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
600                         break;
601                 case IWM_UCODE_TLV_FLAGS:
602                         if (tlv_len < sizeof(uint32_t)) {
603                                 device_printf(sc->sc_dev,
604                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
605                                     __func__,
606                                     (int) tlv_len);
607                                 error = EINVAL;
608                                 goto parse_out;
609                         }
610                         /*
611                          * Apparently there can be many flags, but Linux driver
612                          * parses only the first one, and so do we.
613                          *
614                          * XXX: why does this override IWM_UCODE_TLV_PAN?
615                          * Intentional or a bug?  Observations from
616                          * current firmware file:
617                          *  1) TLV_PAN is parsed first
618                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
619                          * ==> this resets TLV_PAN to itself... hnnnk
620                          */
621                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
622                         break;
623                 case IWM_UCODE_TLV_CSCHEME:
624                         if ((error = iwm_store_cscheme(sc,
625                             tlv_data, tlv_len)) != 0) {
626                                 device_printf(sc->sc_dev,
627                                     "%s: iwm_store_cscheme(): returned %d\n",
628                                     __func__,
629                                     error);
630                                 goto parse_out;
631                         }
632                         break;
633                 case IWM_UCODE_TLV_NUM_OF_CPU: {
634                         uint32_t num_cpu;
635                         if (tlv_len != sizeof(uint32_t)) {
636                                 device_printf(sc->sc_dev,
637                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
638                                     __func__,
639                                     (int) tlv_len);
640                                 error = EINVAL;
641                                 goto parse_out;
642                         }
643                         num_cpu = le32toh(*(const uint32_t *)tlv_data);
644                         if (num_cpu < 1 || num_cpu > 2) {
645                                 device_printf(sc->sc_dev,
646                                     "%s: Driver supports only 1 or 2 CPUs\n",
647                                     __func__);
648                                 error = EINVAL;
649                                 goto parse_out;
650                         }
651                         break;
652                 }
653                 case IWM_UCODE_TLV_SEC_RT:
654                         if ((error = iwm_firmware_store_section(sc,
655                             IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
656                                 device_printf(sc->sc_dev,
657                                     "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
658                                     __func__,
659                                     error);
660                                 goto parse_out;
661                         }
662                         break;
663                 case IWM_UCODE_TLV_SEC_INIT:
664                         if ((error = iwm_firmware_store_section(sc,
665                             IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
666                                 device_printf(sc->sc_dev,
667                                     "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
668                                     __func__,
669                                     error);
670                                 goto parse_out;
671                         }
672                         break;
673                 case IWM_UCODE_TLV_SEC_WOWLAN:
674                         if ((error = iwm_firmware_store_section(sc,
675                             IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
676                                 device_printf(sc->sc_dev,
677                                     "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
678                                     __func__,
679                                     error);
680                                 goto parse_out;
681                         }
682                         break;
683                 case IWM_UCODE_TLV_DEF_CALIB:
684                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
685                                 device_printf(sc->sc_dev,
686                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
687                                     __func__,
688                                     (int) tlv_len,
689                                     (int) sizeof(struct iwm_tlv_calib_data));
690                                 error = EINVAL;
691                                 goto parse_out;
692                         }
693                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
694                                 device_printf(sc->sc_dev,
695                                     "%s: iwm_set_default_calib() failed: %d\n",
696                                     __func__,
697                                     error);
698                                 goto parse_out;
699                         }
700                         break;
701                 case IWM_UCODE_TLV_PHY_SKU:
702                         if (tlv_len != sizeof(uint32_t)) {
703                                 error = EINVAL;
704                                 device_printf(sc->sc_dev,
705                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
706                                     __func__,
707                                     (int) tlv_len);
708                                 goto parse_out;
709                         }
710                         sc->sc_fw_phy_config =
711                             le32toh(*(const uint32_t *)tlv_data);
712                         break;
713
714                 case IWM_UCODE_TLV_API_CHANGES_SET: {
715                         const struct iwm_ucode_api *api;
716                         if (tlv_len != sizeof(*api)) {
717                                 error = EINVAL;
718                                 goto parse_out;
719                         }
720                         api = (const struct iwm_ucode_api *)tlv_data;
721                         /* Flags may exceed 32 bits in future firmware. */
722                         if (le32toh(api->api_index) > 0) {
723                                 device_printf(sc->sc_dev,
724                                     "unsupported API index %d\n",
725                                     le32toh(api->api_index));
726                                 goto parse_out;
727                         }
728                         sc->sc_ucode_api = le32toh(api->api_flags);
729                         break;
730                 }
731
732                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
733                         const struct iwm_ucode_capa *capa;
734                         int idx, i;
735                         if (tlv_len != sizeof(*capa)) {
736                                 error = EINVAL;
737                                 goto parse_out;
738                         }
739                         capa = (const struct iwm_ucode_capa *)tlv_data;
740                         idx = le32toh(capa->api_index);
741                         if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
742                                 device_printf(sc->sc_dev,
743                                     "unsupported API index %d\n", idx);
744                                 goto parse_out;
745                         }
746                         for (i = 0; i < 32; i++) {
747                                 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
748                                         continue;
749                                 setbit(sc->sc_enabled_capa, i + (32 * idx));
750                         }
751                         break;
752                 }
753
754                 case 48: /* undocumented TLV */
755                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
756                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
757                         /* ignore, not used by current driver */
758                         break;
759
760                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
761                         if ((error = iwm_firmware_store_section(sc,
762                             IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
763                             tlv_len)) != 0)
764                                 goto parse_out;
765                         break;
766
767                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
768                         if (tlv_len != sizeof(uint32_t)) {
769                                 error = EINVAL;
770                                 goto parse_out;
771                         }
772                         sc->sc_capa_n_scan_channels =
773                           le32toh(*(const uint32_t *)tlv_data);
774                         break;
775
776                 case IWM_UCODE_TLV_FW_VERSION:
777                         if (tlv_len != sizeof(uint32_t) * 3) {
778                                 error = EINVAL;
779                                 goto parse_out;
780                         }
781                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
782                             "%d.%d.%d",
783                             le32toh(((const uint32_t *)tlv_data)[0]),
784                             le32toh(((const uint32_t *)tlv_data)[1]),
785                             le32toh(((const uint32_t *)tlv_data)[2]));
786                         break;
787
788                 default:
789                         device_printf(sc->sc_dev,
790                             "%s: unknown firmware section %d, abort\n",
791                             __func__, tlv_type);
792                         error = EINVAL;
793                         goto parse_out;
794                 }
795
796                 len -= roundup(tlv_len, 4);
797                 data += roundup(tlv_len, 4);
798         }
799
800         KASSERT(error == 0, ("unhandled error"));
801
802  parse_out:
803         if (error) {
804                 device_printf(sc->sc_dev, "firmware parse error %d, "
805                     "section type %d\n", error, tlv_type);
806         }
807
808         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
809                 device_printf(sc->sc_dev,
810                     "device uses unsupported power ops\n");
811                 error = ENOTSUP;
812         }
813
814  out:
815         if (error) {
816                 fw->fw_status = IWM_FW_STATUS_NONE;
817                 if (fw->fw_fp != NULL)
818                         iwm_fw_info_free(fw);
819         } else
820                 fw->fw_status = IWM_FW_STATUS_DONE;
821         wakeup(&sc->sc_fw);
822
823         return error;
824 }
825
826 /*
827  * DMA resource routines
828  */
829
830 static void
831 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
832 {
833         if (error != 0)
834                 return;
835         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
836         *(bus_addr_t *)arg = segs[0].ds_addr;
837 }
838
839 static int
840 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
841     bus_size_t size, bus_size_t alignment)
842 {
843         int error;
844
845         dma->tag = NULL;
846         dma->map = NULL;
847         dma->size = size;
848         dma->vaddr = NULL;
849
850         error = bus_dma_tag_create(tag, alignment,
851             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
852             1, size, 0, NULL, NULL, &dma->tag);
853         if (error != 0)
854                 goto fail;
855
856         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
857             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
858         if (error != 0)
859                 goto fail;
860
861         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
862             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
863         if (error != 0) {
864                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
865                 dma->vaddr = NULL;
866                 goto fail;
867         }
868
869         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
870
871         return 0;
872
873 fail:
874         iwm_dma_contig_free(dma);
875
876         return error;
877 }
878
879 static void
880 iwm_dma_contig_free(struct iwm_dma_info *dma)
881 {
882         if (dma->vaddr != NULL) {
883                 bus_dmamap_sync(dma->tag, dma->map,
884                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
885                 bus_dmamap_unload(dma->tag, dma->map);
886                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
887                 dma->vaddr = NULL;
888         }
889         if (dma->tag != NULL) {
890                 bus_dma_tag_destroy(dma->tag);
891                 dma->tag = NULL;
892         }
893 }
894
895 /* fwmem is used to load firmware onto the card */
896 static int
897 iwm_alloc_fwmem(struct iwm_softc *sc)
898 {
899         /* Must be aligned on a 16-byte boundary. */
900         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
901             sc->sc_fwdmasegsz, 16);
902 }
903
904 /* tx scheduler rings.  not used? */
905 static int
906 iwm_alloc_sched(struct iwm_softc *sc)
907 {
908         /* TX scheduler rings must be aligned on a 1KB boundary. */
909         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
910             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
911 }
912
913 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
914 static int
915 iwm_alloc_kw(struct iwm_softc *sc)
916 {
917         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
918 }
919
920 /* interrupt cause table */
921 static int
922 iwm_alloc_ict(struct iwm_softc *sc)
923 {
924         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
925             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
926 }
927
928 static int
929 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
930 {
931         bus_size_t size;
932         int i, error;
933
934         ring->cur = 0;
935
936         /* Allocate RX descriptors (256-byte aligned). */
937         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
938         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
939         if (error != 0) {
940                 device_printf(sc->sc_dev,
941                     "could not allocate RX ring DMA memory\n");
942                 goto fail;
943         }
944         ring->desc = ring->desc_dma.vaddr;
945
946         /* Allocate RX status area (16-byte aligned). */
947         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
948             sizeof(*ring->stat), 16);
949         if (error != 0) {
950                 device_printf(sc->sc_dev,
951                     "could not allocate RX status DMA memory\n");
952                 goto fail;
953         }
954         ring->stat = ring->stat_dma.vaddr;
955
956         /* Create RX buffer DMA tag. */
957         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
958             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
959             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
960         if (error != 0) {
961                 device_printf(sc->sc_dev,
962                     "%s: could not create RX buf DMA tag, error %d\n",
963                     __func__, error);
964                 goto fail;
965         }
966
967         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
968         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
969         if (error != 0) {
970                 device_printf(sc->sc_dev,
971                     "%s: could not create RX buf DMA map, error %d\n",
972                     __func__, error);
973                 goto fail;
974         }
975         /*
976          * Allocate and map RX buffers.
977          */
978         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
979                 struct iwm_rx_data *data = &ring->data[i];
980                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
981                 if (error != 0) {
982                         device_printf(sc->sc_dev,
983                             "%s: could not create RX buf DMA map, error %d\n",
984                             __func__, error);
985                         goto fail;
986                 }
987                 data->m = NULL;
988
989                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
990                         goto fail;
991                 }
992         }
993         return 0;
994
995 fail:   iwm_free_rx_ring(sc, ring);
996         return error;
997 }
998
999 static void
1000 iwm_disable_rx_dma(struct iwm_softc *sc)
1001 {
1002         /* XXX conditional nic locks are stupid */
1003         /* XXX print out if we can't lock the NIC? */
1004         if (iwm_nic_lock(sc)) {
1005                 /* XXX handle if RX stop doesn't finish? */
1006                 (void) iwm_pcie_rx_stop(sc);
1007                 iwm_nic_unlock(sc);
1008         }
1009 }
1010
1011 static void
1012 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1013 {
1014         /* Reset the ring state */
1015         ring->cur = 0;
1016
1017         /*
1018          * The hw rx ring index in shared memory must also be cleared,
1019          * otherwise the discrepancy can cause reprocessing chaos.
1020          */
1021         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1022 }
1023
1024 static void
1025 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1026 {
1027         int i;
1028
1029         iwm_dma_contig_free(&ring->desc_dma);
1030         iwm_dma_contig_free(&ring->stat_dma);
1031
1032         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1033                 struct iwm_rx_data *data = &ring->data[i];
1034
1035                 if (data->m != NULL) {
1036                         bus_dmamap_sync(ring->data_dmat, data->map,
1037                             BUS_DMASYNC_POSTREAD);
1038                         bus_dmamap_unload(ring->data_dmat, data->map);
1039                         m_freem(data->m);
1040                         data->m = NULL;
1041                 }
1042                 if (data->map != NULL) {
1043                         bus_dmamap_destroy(ring->data_dmat, data->map);
1044                         data->map = NULL;
1045                 }
1046         }
1047         if (ring->spare_map != NULL) {
1048                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1049                 ring->spare_map = NULL;
1050         }
1051         if (ring->data_dmat != NULL) {
1052                 bus_dma_tag_destroy(ring->data_dmat);
1053                 ring->data_dmat = NULL;
1054         }
1055 }
1056
1057 static int
1058 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1059 {
1060         bus_addr_t paddr;
1061         bus_size_t size;
1062         size_t maxsize;
1063         int nsegments;
1064         int i, error;
1065
1066         ring->qid = qid;
1067         ring->queued = 0;
1068         ring->cur = 0;
1069
1070         /* Allocate TX descriptors (256-byte aligned). */
1071         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1072         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1073         if (error != 0) {
1074                 device_printf(sc->sc_dev,
1075                     "could not allocate TX ring DMA memory\n");
1076                 goto fail;
1077         }
1078         ring->desc = ring->desc_dma.vaddr;
1079
1080         /*
1081          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1082          * to allocate commands space for other rings.
1083          */
1084         if (qid > IWM_MVM_CMD_QUEUE)
1085                 return 0;
1086
1087         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1088         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1089         if (error != 0) {
1090                 device_printf(sc->sc_dev,
1091                     "could not allocate TX cmd DMA memory\n");
1092                 goto fail;
1093         }
1094         ring->cmd = ring->cmd_dma.vaddr;
1095
1096         /* FW commands may require more mapped space than packets. */
1097         if (qid == IWM_MVM_CMD_QUEUE) {
1098                 maxsize = IWM_RBUF_SIZE;
1099                 nsegments = 1;
1100         } else {
1101                 maxsize = MCLBYTES;
1102                 nsegments = IWM_MAX_SCATTER - 2;
1103         }
1104
1105         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1106             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1107             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1108         if (error != 0) {
1109                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1110                 goto fail;
1111         }
1112
1113         paddr = ring->cmd_dma.paddr;
1114         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1115                 struct iwm_tx_data *data = &ring->data[i];
1116
1117                 data->cmd_paddr = paddr;
1118                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1119                     + offsetof(struct iwm_tx_cmd, scratch);
1120                 paddr += sizeof(struct iwm_device_cmd);
1121
1122                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1123                 if (error != 0) {
1124                         device_printf(sc->sc_dev,
1125                             "could not create TX buf DMA map\n");
1126                         goto fail;
1127                 }
1128         }
1129         KASSERT(paddr == ring->cmd_dma.paddr + size,
1130             ("invalid physical address"));
1131         return 0;
1132
1133 fail:   iwm_free_tx_ring(sc, ring);
1134         return error;
1135 }
1136
1137 static void
1138 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1139 {
1140         int i;
1141
1142         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1143                 struct iwm_tx_data *data = &ring->data[i];
1144
1145                 if (data->m != NULL) {
1146                         bus_dmamap_sync(ring->data_dmat, data->map,
1147                             BUS_DMASYNC_POSTWRITE);
1148                         bus_dmamap_unload(ring->data_dmat, data->map);
1149                         m_freem(data->m);
1150                         data->m = NULL;
1151                 }
1152         }
1153         /* Clear TX descriptors. */
1154         memset(ring->desc, 0, ring->desc_dma.size);
1155         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1156             BUS_DMASYNC_PREWRITE);
1157         sc->qfullmsk &= ~(1 << ring->qid);
1158         ring->queued = 0;
1159         ring->cur = 0;
1160 }
1161
1162 static void
1163 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1164 {
1165         int i;
1166
1167         iwm_dma_contig_free(&ring->desc_dma);
1168         iwm_dma_contig_free(&ring->cmd_dma);
1169
1170         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1171                 struct iwm_tx_data *data = &ring->data[i];
1172
1173                 if (data->m != NULL) {
1174                         bus_dmamap_sync(ring->data_dmat, data->map,
1175                             BUS_DMASYNC_POSTWRITE);
1176                         bus_dmamap_unload(ring->data_dmat, data->map);
1177                         m_freem(data->m);
1178                         data->m = NULL;
1179                 }
1180                 if (data->map != NULL) {
1181                         bus_dmamap_destroy(ring->data_dmat, data->map);
1182                         data->map = NULL;
1183                 }
1184         }
1185         if (ring->data_dmat != NULL) {
1186                 bus_dma_tag_destroy(ring->data_dmat);
1187                 ring->data_dmat = NULL;
1188         }
1189 }
1190
1191 /*
1192  * High-level hardware frobbing routines
1193  */
1194
1195 static void
1196 iwm_enable_interrupts(struct iwm_softc *sc)
1197 {
1198         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1199         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1200 }
1201
1202 static void
1203 iwm_restore_interrupts(struct iwm_softc *sc)
1204 {
1205         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1206 }
1207
1208 static void
1209 iwm_disable_interrupts(struct iwm_softc *sc)
1210 {
1211         /* disable interrupts */
1212         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1213
1214         /* acknowledge all interrupts */
1215         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1216         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1217 }
1218
1219 static void
1220 iwm_ict_reset(struct iwm_softc *sc)
1221 {
1222         iwm_disable_interrupts(sc);
1223
1224         /* Reset ICT table. */
1225         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1226         sc->ict_cur = 0;
1227
1228         /* Set physical address of ICT table (4KB aligned). */
1229         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1230             IWM_CSR_DRAM_INT_TBL_ENABLE
1231             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1232             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1233             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1234
1235         /* Switch to ICT interrupt mode in driver. */
1236         sc->sc_flags |= IWM_FLAG_USE_ICT;
1237
1238         /* Re-enable interrupts. */
1239         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1240         iwm_enable_interrupts(sc);
1241 }
1242
1243 /* iwlwifi pcie/trans.c */
1244
1245 /*
1246  * Since this .. hard-resets things, it's time to actually
1247  * mark the first vap (if any) as having no mac context.
1248  * It's annoying, but since the driver is potentially being
1249  * stop/start'ed whilst active (thanks openbsd port!) we
1250  * have to correctly track this.
1251  */
1252 static void
1253 iwm_stop_device(struct iwm_softc *sc)
1254 {
1255         struct ieee80211com *ic = &sc->sc_ic;
1256         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1257         int chnl, qid;
1258         uint32_t mask = 0;
1259
1260         /* tell the device to stop sending interrupts */
1261         iwm_disable_interrupts(sc);
1262
1263         /*
1264          * FreeBSD-local: mark the first vap as not-uploaded,
1265          * so the next transition through auth/assoc
1266          * will correctly populate the MAC context.
1267          */
1268         if (vap) {
1269                 struct iwm_vap *iv = IWM_VAP(vap);
1270                 iv->is_uploaded = 0;
1271         }
1272
1273         /* device going down, Stop using ICT table */
1274         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1275
1276         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1277
1278         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1279
1280         if (iwm_nic_lock(sc)) {
1281                 /* Stop each Tx DMA channel */
1282                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1283                         IWM_WRITE(sc,
1284                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1285                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1286                 }
1287
1288                 /* Wait for DMA channels to be idle */
1289                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1290                     5000)) {
1291                         device_printf(sc->sc_dev,
1292                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1293                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1294                 }
1295                 iwm_nic_unlock(sc);
1296         }
1297         iwm_disable_rx_dma(sc);
1298
1299         /* Stop RX ring. */
1300         iwm_reset_rx_ring(sc, &sc->rxq);
1301
1302         /* Reset all TX rings. */
1303         for (qid = 0; qid < nitems(sc->txq); qid++)
1304                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1305
1306         /*
1307          * Power-down device's busmaster DMA clocks
1308          */
1309         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1310         DELAY(5);
1311
1312         /* Make sure (redundant) we've released our request to stay awake */
1313         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1314             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1315
1316         /* Stop the device, and put it in low power state */
1317         iwm_apm_stop(sc);
1318
1319         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1320          * Clean again the interrupt here
1321          */
1322         iwm_disable_interrupts(sc);
1323         /* stop and reset the on-board processor */
1324         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1325
1326         /*
1327          * Even if we stop the HW, we still want the RF kill
1328          * interrupt
1329          */
1330         iwm_enable_rfkill_int(sc);
1331         iwm_check_rfkill(sc);
1332 }
1333
1334 /* iwlwifi: mvm/ops.c */
1335 static void
1336 iwm_mvm_nic_config(struct iwm_softc *sc)
1337 {
1338         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1339         uint32_t reg_val = 0;
1340
1341         radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1342             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1343         radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1344             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1345         radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1346             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1347
1348         /* SKU control */
1349         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1350             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1351         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1352             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1353
1354         /* radio configuration */
1355         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1356         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1357         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1358
1359         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1360
1361         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1362             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1363             radio_cfg_step, radio_cfg_dash);
1364
1365         /*
1366          * W/A : NIC is stuck in a reset state after Early PCIe power off
1367          * (PCIe power is lost before PERST# is asserted), causing ME FW
1368          * to lose ownership and not being able to obtain it back.
1369          */
1370         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1371                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1372                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1373                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1374         }
1375 }
1376
1377 static int
1378 iwm_nic_rx_init(struct iwm_softc *sc)
1379 {
1380         if (!iwm_nic_lock(sc))
1381                 return EBUSY;
1382
1383         /*
1384          * Initialize RX ring.  This is from the iwn driver.
1385          */
1386         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1387
1388         /* stop DMA */
1389         iwm_disable_rx_dma(sc);
1390         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1391         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1392         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1393         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1394
1395         /* Set physical address of RX ring (256-byte aligned). */
1396         IWM_WRITE(sc,
1397             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1398
1399         /* Set physical address of RX status (16-byte aligned). */
1400         IWM_WRITE(sc,
1401             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1402
1403         /* Enable RX. */
1404         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1405             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1406             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1407             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1408             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1409             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1410             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1411             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1412
1413         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1414
1415         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1416         if (sc->host_interrupt_operation_mode)
1417                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1418
1419         /*
1420          * Thus sayeth el jefe (iwlwifi) via a comment:
1421          *
1422          * This value should initially be 0 (before preparing any
1423          * RBs), should be 8 after preparing the first 8 RBs (for example)
1424          */
1425         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1426
1427         iwm_nic_unlock(sc);
1428
1429         return 0;
1430 }
1431
1432 static int
1433 iwm_nic_tx_init(struct iwm_softc *sc)
1434 {
1435         int qid;
1436
1437         if (!iwm_nic_lock(sc))
1438                 return EBUSY;
1439
1440         /* Deactivate TX scheduler. */
1441         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1442
1443         /* Set physical address of "keep warm" page (16-byte aligned). */
1444         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1445
1446         /* Initialize TX rings. */
1447         for (qid = 0; qid < nitems(sc->txq); qid++) {
1448                 struct iwm_tx_ring *txq = &sc->txq[qid];
1449
1450                 /* Set physical address of TX ring (256-byte aligned). */
1451                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1452                     txq->desc_dma.paddr >> 8);
1453                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1454                     "%s: loading ring %d descriptors (%p) at %lx\n",
1455                     __func__,
1456                     qid, txq->desc,
1457                     (unsigned long) (txq->desc_dma.paddr >> 8));
1458         }
1459
1460         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1461
1462         iwm_nic_unlock(sc);
1463
1464         return 0;
1465 }
1466
1467 static int
1468 iwm_nic_init(struct iwm_softc *sc)
1469 {
1470         int error;
1471
1472         iwm_apm_init(sc);
1473         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1474                 iwm_set_pwr(sc);
1475
1476         iwm_mvm_nic_config(sc);
1477
1478         if ((error = iwm_nic_rx_init(sc)) != 0)
1479                 return error;
1480
1481         /*
1482          * Ditto for TX, from iwn
1483          */
1484         if ((error = iwm_nic_tx_init(sc)) != 0)
1485                 return error;
1486
1487         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1488             "%s: shadow registers enabled\n", __func__);
1489         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1490
1491         return 0;
1492 }
1493
1494 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1495         IWM_MVM_TX_FIFO_VO,
1496         IWM_MVM_TX_FIFO_VI,
1497         IWM_MVM_TX_FIFO_BE,
1498         IWM_MVM_TX_FIFO_BK,
1499 };
1500
1501 static int
1502 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1503 {
1504         if (!iwm_nic_lock(sc)) {
1505                 device_printf(sc->sc_dev,
1506                     "%s: cannot enable txq %d\n",
1507                     __func__,
1508                     qid);
1509                 return EBUSY;
1510         }
1511
1512         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1513
1514         if (qid == IWM_MVM_CMD_QUEUE) {
1515                 /* unactivate before configuration */
1516                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1517                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1518                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1519
1520                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1521
1522                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1523
1524                 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1525                 /* Set scheduler window size and frame limit. */
1526                 iwm_write_mem32(sc,
1527                     sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1528                     sizeof(uint32_t),
1529                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1530                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1531                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1532                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1533
1534                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1535                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1536                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1537                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1538                     IWM_SCD_QUEUE_STTS_REG_MSK);
1539         } else {
1540                 struct iwm_scd_txq_cfg_cmd cmd;
1541                 int error;
1542
1543                 iwm_nic_unlock(sc);
1544
1545                 memset(&cmd, 0, sizeof(cmd));
1546                 cmd.scd_queue = qid;
1547                 cmd.enable = 1;
1548                 cmd.sta_id = sta_id;
1549                 cmd.tx_fifo = fifo;
1550                 cmd.aggregate = 0;
1551                 cmd.window = IWM_FRAME_LIMIT;
1552
1553                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1554                     sizeof(cmd), &cmd);
1555                 if (error) {
1556                         device_printf(sc->sc_dev,
1557                             "cannot enable txq %d\n", qid);
1558                         return error;
1559                 }
1560
1561                 if (!iwm_nic_lock(sc))
1562                         return EBUSY;
1563         }
1564
1565         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1566             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1567
1568         iwm_nic_unlock(sc);
1569
1570         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1571             __func__, qid, fifo);
1572
1573         return 0;
1574 }
1575
1576 static int
1577 iwm_post_alive(struct iwm_softc *sc)
1578 {
1579         int nwords;
1580         int error, chnl;
1581         uint32_t base;
1582
1583         if (!iwm_nic_lock(sc))
1584                 return EBUSY;
1585
1586         base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1587         if (sc->sched_base != base) {
1588                 device_printf(sc->sc_dev,
1589                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1590                     __func__, sc->sched_base, base);
1591         }
1592
1593         iwm_ict_reset(sc);
1594
1595         /* Clear TX scheduler state in SRAM. */
1596         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1597             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1598             / sizeof(uint32_t);
1599         error = iwm_write_mem(sc,
1600             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1601             NULL, nwords);
1602         if (error)
1603                 goto out;
1604
1605         /* Set physical address of TX scheduler rings (1KB aligned). */
1606         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1607
1608         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1609
1610         iwm_nic_unlock(sc);
1611
1612         /* enable command channel */
1613         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1614         if (error)
1615                 return error;
1616
1617         if (!iwm_nic_lock(sc))
1618                 return EBUSY;
1619
1620         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1621
1622         /* Enable DMA channels. */
1623         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1624                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1625                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1626                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1627         }
1628
1629         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1630             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1631
1632         /* Enable L1-Active */
1633         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1634                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1635                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1636         }
1637
1638  out:
1639         iwm_nic_unlock(sc);
1640         return error;
1641 }
1642
1643 /*
1644  * NVM read access and content parsing.  We do not support
1645  * external NVM or writing NVM.
1646  * iwlwifi/mvm/nvm.c
1647  */
1648
1649 /* list of NVM sections we are allowed/need to read */
1650 const int nvm_to_read[] = {
1651         IWM_NVM_SECTION_TYPE_HW,
1652         IWM_NVM_SECTION_TYPE_SW,
1653         IWM_NVM_SECTION_TYPE_REGULATORY,
1654         IWM_NVM_SECTION_TYPE_CALIBRATION,
1655         IWM_NVM_SECTION_TYPE_PRODUCTION,
1656         IWM_NVM_SECTION_TYPE_HW_8000,
1657         IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
1658         IWM_NVM_SECTION_TYPE_PHY_SKU,
1659 };
1660
1661 /* Default NVM size to read */
1662 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1663 #define IWM_MAX_NVM_SECTION_SIZE        8192
1664
1665 #define IWM_NVM_WRITE_OPCODE 1
1666 #define IWM_NVM_READ_OPCODE 0
1667
1668 /* load nvm chunk response */
1669 enum {
1670         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1671         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1672 };
1673
1674 static int
1675 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1676         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1677 {
1678         offset = 0;
1679         struct iwm_nvm_access_cmd nvm_access_cmd = {
1680                 .offset = htole16(offset),
1681                 .length = htole16(length),
1682                 .type = htole16(section),
1683                 .op_code = IWM_NVM_READ_OPCODE,
1684         };
1685         struct iwm_nvm_access_resp *nvm_resp;
1686         struct iwm_rx_packet *pkt;
1687         struct iwm_host_cmd cmd = {
1688                 .id = IWM_NVM_ACCESS_CMD,
1689                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1690                 .data = { &nvm_access_cmd, },
1691         };
1692         int ret, bytes_read, offset_read;
1693         uint8_t *resp_data;
1694
1695         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1696
1697         ret = iwm_send_cmd(sc, &cmd);
1698         if (ret) {
1699                 device_printf(sc->sc_dev,
1700                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1701                 return ret;
1702         }
1703
1704         pkt = cmd.resp_pkt;
1705         if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1706                 device_printf(sc->sc_dev,
1707                     "Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1708                     pkt->hdr.flags);
1709                 ret = EIO;
1710                 goto exit;
1711         }
1712
1713         /* Extract NVM response */
1714         nvm_resp = (void *)pkt->data;
1715
1716         ret = le16toh(nvm_resp->status);
1717         bytes_read = le16toh(nvm_resp->length);
1718         offset_read = le16toh(nvm_resp->offset);
1719         resp_data = nvm_resp->data;
1720         if (ret) {
1721                 if ((offset != 0) &&
1722                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1723                         /*
1724                          * meaning of NOT_VALID_ADDRESS:
1725                          * driver try to read chunk from address that is
1726                          * multiple of 2K and got an error since addr is empty.
1727                          * meaning of (offset != 0): driver already
1728                          * read valid data from another chunk so this case
1729                          * is not an error.
1730                          */
1731                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1732                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1733                                     offset);
1734                         *len = 0;
1735                         ret = 0;
1736                 } else {
1737                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1738                                     "NVM access command failed with status %d\n", ret);
1739                         ret = EIO;
1740                 }
1741                 goto exit;
1742         }
1743
1744         if (offset_read != offset) {
1745                 device_printf(sc->sc_dev,
1746                     "NVM ACCESS response with invalid offset %d\n",
1747                     offset_read);
1748                 ret = EINVAL;
1749                 goto exit;
1750         }
1751
1752         if (bytes_read > length) {
1753                 device_printf(sc->sc_dev,
1754                     "NVM ACCESS response with too much data "
1755                     "(%d bytes requested, %d bytes received)\n",
1756                     length, bytes_read);
1757                 ret = EINVAL;
1758                 goto exit;
1759         }
1760
1761         memcpy(data + offset, resp_data, bytes_read);
1762         *len = bytes_read;
1763
1764  exit:
1765         iwm_free_resp(sc, &cmd);
1766         return ret;
1767 }
1768
1769 /*
1770  * Reads an NVM section completely.
1771  * NICs prior to 7000 family don't have a real NVM, but just read
1772  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1773  * by uCode, we need to manually check in this case that we don't
1774  * overflow and try to read more than the EEPROM size.
1775  * For 7000 family NICs, we supply the maximal size we can read, and
1776  * the uCode fills the response with as much data as we can,
1777  * without overflowing, so no check is needed.
1778  */
1779 static int
1780 iwm_nvm_read_section(struct iwm_softc *sc,
1781         uint16_t section, uint8_t *data, uint16_t *len, size_t max_len)
1782 {
1783         uint16_t chunklen, seglen;
1784         int error = 0;
1785
1786         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1787             "reading NVM section %d\n", section);
1788
1789         chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1790         *len = 0;
1791
1792         /* Read NVM chunks until exhausted (reading less than requested) */
1793         while (seglen == chunklen && *len < max_len) {
1794                 error = iwm_nvm_read_chunk(sc,
1795                     section, *len, chunklen, data, &seglen);
1796                 if (error) {
1797                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1798                             "Cannot read from NVM section "
1799                             "%d at offset %d\n", section, *len);
1800                         return error;
1801                 }
1802                 *len += seglen;
1803         }
1804
1805         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1806             "NVM section %d read completed (%d bytes, error=%d)\n",
1807             section, *len, error);
1808         return error;
1809 }
1810
1811 /*
1812  * BEGIN IWM_NVM_PARSE
1813  */
1814
1815 /* iwlwifi/iwl-nvm-parse.c */
1816
1817 /* NVM offsets (in words) definitions */
1818 enum iwm_nvm_offsets {
1819         /* NVM HW-Section offset (in words) definitions */
1820         IWM_HW_ADDR = 0x15,
1821
1822 /* NVM SW-Section offset (in words) definitions */
1823         IWM_NVM_SW_SECTION = 0x1C0,
1824         IWM_NVM_VERSION = 0,
1825         IWM_RADIO_CFG = 1,
1826         IWM_SKU = 2,
1827         IWM_N_HW_ADDRS = 3,
1828         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1829
1830 /* NVM calibration section offset (in words) definitions */
1831         IWM_NVM_CALIB_SECTION = 0x2B8,
1832         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1833 };
1834
1835 enum iwm_8000_nvm_offsets {
1836         /* NVM HW-Section offset (in words) definitions */
1837         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1838         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1839         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1840         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1841         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1842
1843         /* NVM SW-Section offset (in words) definitions */
1844         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1845         IWM_NVM_VERSION_8000 = 0,
1846         IWM_RADIO_CFG_8000 = 0,
1847         IWM_SKU_8000 = 2,
1848         IWM_N_HW_ADDRS_8000 = 3,
1849
1850         /* NVM REGULATORY -Section offset (in words) definitions */
1851         IWM_NVM_CHANNELS_8000 = 0,
1852         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1853         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1854         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1855
1856         /* NVM calibration section offset (in words) definitions */
1857         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1858         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1859 };
1860
1861 /* SKU Capabilities (actual values from NVM definition) */
1862 enum nvm_sku_bits {
1863         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1864         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1865         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1866         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1867 };
1868
1869 /* radio config bits (actual values from NVM definition) */
1870 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1871 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1872 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1873 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1874 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1875 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1876
1877 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1878 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1879 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1880 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1881 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1882 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1883
1884 #define DEFAULT_MAX_TX_POWER 16
1885
1886 /**
1887  * enum iwm_nvm_channel_flags - channel flags in NVM
1888  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1889  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1890  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1891  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1892  * XXX cannot find this (DFS) flag in iwl-nvm-parse.c
1893  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1894  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1895  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1896  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1897  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1898  */
1899 enum iwm_nvm_channel_flags {
1900         IWM_NVM_CHANNEL_VALID = (1 << 0),
1901         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1902         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1903         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1904         IWM_NVM_CHANNEL_DFS = (1 << 7),
1905         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1906         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1907         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1908         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1909 };
1910
1911 /*
1912  * Translate EEPROM flags to net80211.
1913  */
1914 static uint32_t
1915 iwm_eeprom_channel_flags(uint16_t ch_flags)
1916 {
1917         uint32_t nflags;
1918
1919         nflags = 0;
1920         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1921                 nflags |= IEEE80211_CHAN_PASSIVE;
1922         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1923                 nflags |= IEEE80211_CHAN_NOADHOC;
1924         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1925                 nflags |= IEEE80211_CHAN_DFS;
1926                 /* Just in case. */
1927                 nflags |= IEEE80211_CHAN_NOADHOC;
1928         }
1929
1930         return (nflags);
1931 }
1932
1933 static void
1934 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1935     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1936     const uint8_t bands[])
1937 {
1938         const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
1939         uint32_t nflags;
1940         uint16_t ch_flags;
1941         uint8_t ieee;
1942         int error;
1943
1944         for (; ch_idx < ch_num; ch_idx++) {
1945                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1946                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1947                         ieee = iwm_nvm_channels[ch_idx];
1948                 else
1949                         ieee = iwm_nvm_channels_8000[ch_idx];
1950
1951                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1952                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1953                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
1954                             ieee, ch_flags,
1955                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1956                             "5.2" : "2.4");
1957                         continue;
1958                 }
1959
1960                 nflags = iwm_eeprom_channel_flags(ch_flags);
1961                 error = ieee80211_add_channel(chans, maxchans, nchans,
1962                     ieee, 0, 0, nflags, bands);
1963                 if (error != 0)
1964                         break;
1965
1966                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1967                     "Ch. %d Flags %x [%sGHz] - Added\n",
1968                     ieee, ch_flags,
1969                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1970                     "5.2" : "2.4");
1971         }
1972 }
1973
1974 static void
1975 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1976     struct ieee80211_channel chans[])
1977 {
1978         struct iwm_softc *sc = ic->ic_softc;
1979         struct iwm_nvm_data *data = &sc->sc_nvm;
1980         uint8_t bands[IEEE80211_MODE_BYTES];
1981         size_t ch_num;
1982
1983         memset(bands, 0, sizeof(bands));
1984         /* 1-13: 11b/g channels. */
1985         setbit(bands, IEEE80211_MODE_11B);
1986         setbit(bands, IEEE80211_MODE_11G);
1987         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1988             IWM_NUM_2GHZ_CHANNELS - 1, bands);
1989
1990         /* 14: 11b channel only. */
1991         clrbit(bands, IEEE80211_MODE_11G);
1992         iwm_add_channel_band(sc, chans, maxchans, nchans,
1993             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
1994
1995         if (data->sku_cap_band_52GHz_enable) {
1996                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1997                         ch_num = nitems(iwm_nvm_channels);
1998                 else
1999                         ch_num = nitems(iwm_nvm_channels_8000);
2000                 memset(bands, 0, sizeof(bands));
2001                 setbit(bands, IEEE80211_MODE_11A);
2002                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2003                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2004         }
2005 }
2006
2007 static void
2008 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2009         const uint16_t *mac_override, const uint16_t *nvm_hw)
2010 {
2011         const uint8_t *hw_addr;
2012
2013         if (mac_override) {
2014                 static const uint8_t reserved_mac[] = {
2015                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2016                 };
2017
2018                 hw_addr = (const uint8_t *)(mac_override +
2019                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2020
2021                 /*
2022                  * Store the MAC address from MAO section.
2023                  * No byte swapping is required in MAO section
2024                  */
2025                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2026
2027                 /*
2028                  * Force the use of the OTP MAC address in case of reserved MAC
2029                  * address in the NVM, or if address is given but invalid.
2030                  */
2031                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2032                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2033                     iwm_is_valid_ether_addr(data->hw_addr) &&
2034                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2035                         return;
2036
2037                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2038                     "%s: mac address from nvm override section invalid\n",
2039                     __func__);
2040         }
2041
2042         if (nvm_hw) {
2043                 /* read the mac address from WFMP registers */
2044                 uint32_t mac_addr0 =
2045                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2046                 uint32_t mac_addr1 =
2047                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2048
2049                 hw_addr = (const uint8_t *)&mac_addr0;
2050                 data->hw_addr[0] = hw_addr[3];
2051                 data->hw_addr[1] = hw_addr[2];
2052                 data->hw_addr[2] = hw_addr[1];
2053                 data->hw_addr[3] = hw_addr[0];
2054
2055                 hw_addr = (const uint8_t *)&mac_addr1;
2056                 data->hw_addr[4] = hw_addr[1];
2057                 data->hw_addr[5] = hw_addr[0];
2058
2059                 return;
2060         }
2061
2062         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2063         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2064 }
2065
2066 static int
2067 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2068             const uint16_t *phy_sku)
2069 {
2070         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2071                 return le16_to_cpup(nvm_sw + IWM_SKU);
2072
2073         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2074 }
2075
2076 static int
2077 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2078 {
2079         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2080                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2081         else
2082                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2083                                                 IWM_NVM_VERSION_8000));
2084 }
2085
2086 static int
2087 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2088                   const uint16_t *phy_sku)
2089 {
2090         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2091                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2092
2093         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2094 }
2095
2096 static int
2097 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2098 {
2099         int n_hw_addr;
2100
2101         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2102                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2103
2104         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2105
2106         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2107 }
2108
2109 static void
2110 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2111                   uint32_t radio_cfg)
2112 {
2113         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2114                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2115                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2116                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2117                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2118                 return;
2119         }
2120
2121         /* set the radio configuration for family 8000 */
2122         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2123         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2124         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2125         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2126         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2127         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2128 }
2129
2130 static int
2131 iwm_parse_nvm_data(struct iwm_softc *sc,
2132                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2133                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2134                    const uint16_t *phy_sku, const uint16_t *regulatory)
2135 {
2136         struct iwm_nvm_data *data = &sc->sc_nvm;
2137         uint8_t hw_addr[IEEE80211_ADDR_LEN];
2138         uint32_t sku, radio_cfg;
2139
2140         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2141
2142         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2143         iwm_set_radio_cfg(sc, data, radio_cfg);
2144
2145         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2146         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2147         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2148         data->sku_cap_11n_enable = 0;
2149
2150         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2151
2152         /* The byte order is little endian 16 bit, meaning 214365 */
2153         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2154                 IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
2155                 data->hw_addr[0] = hw_addr[1];
2156                 data->hw_addr[1] = hw_addr[0];
2157                 data->hw_addr[2] = hw_addr[3];
2158                 data->hw_addr[3] = hw_addr[2];
2159                 data->hw_addr[4] = hw_addr[5];
2160                 data->hw_addr[5] = hw_addr[4];
2161         } else {
2162                 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2163         }
2164
2165         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2166                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2167                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2168         } else {
2169                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2170                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2171         }
2172
2173         return 0;
2174 }
2175
2176 /*
2177  * END NVM PARSE
2178  */
2179
2180 static int
2181 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2182 {
2183         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2184
2185         /* Checking for required sections */
2186         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2187                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2188                     !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2189                         device_printf(sc->sc_dev,
2190                             "Can't parse empty OTP/NVM sections\n");
2191                         return ENOENT;
2192                 }
2193
2194                 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2195         } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2196                 /* SW and REGULATORY sections are mandatory */
2197                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2198                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2199                         device_printf(sc->sc_dev,
2200                             "Can't parse empty OTP/NVM sections\n");
2201                         return ENOENT;
2202                 }
2203                 /* MAC_OVERRIDE or at least HW section must exist */
2204                 if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2205                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2206                         device_printf(sc->sc_dev,
2207                             "Can't parse mac_address, empty sections\n");
2208                         return ENOENT;
2209                 }
2210
2211                 /* PHY_SKU section is mandatory in B0 */
2212                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2213                         device_printf(sc->sc_dev,
2214                             "Can't parse phy_sku in B0, empty sections\n");
2215                         return ENOENT;
2216                 }
2217
2218                 hw = (const uint16_t *)
2219                     sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2220         } else {
2221                 panic("unknown device family %d\n", sc->sc_device_family);
2222         }
2223
2224         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2225         calib = (const uint16_t *)
2226             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2227         regulatory = (const uint16_t *)
2228             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2229         mac_override = (const uint16_t *)
2230             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2231         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2232
2233         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2234             phy_sku, regulatory);
2235 }
2236
2237 static int
2238 iwm_nvm_init(struct iwm_softc *sc)
2239 {
2240         struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2241         int i, section, error;
2242         uint16_t len;
2243         uint8_t *buf;
2244         const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
2245
2246         memset(nvm_sections, 0 , sizeof(nvm_sections));
2247
2248         buf = malloc(bufsz, M_DEVBUF, M_NOWAIT);
2249         if (buf == NULL)
2250                 return ENOMEM;
2251
2252         for (i = 0; i < nitems(nvm_to_read); i++) {
2253                 section = nvm_to_read[i];
2254                 KASSERT(section <= nitems(nvm_sections),
2255                     ("too many sections"));
2256
2257                 error = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2258                 if (error) {
2259                         error = 0;
2260                         continue;
2261                 }
2262                 nvm_sections[section].data = malloc(len, M_DEVBUF, M_NOWAIT);
2263                 if (nvm_sections[section].data == NULL) {
2264                         error = ENOMEM;
2265                         break;
2266                 }
2267                 memcpy(nvm_sections[section].data, buf, len);
2268                 nvm_sections[section].length = len;
2269         }
2270         free(buf, M_DEVBUF);
2271         if (error == 0)
2272                 error = iwm_parse_nvm_sections(sc, nvm_sections);
2273
2274         for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2275                 if (nvm_sections[i].data != NULL)
2276                         free(nvm_sections[i].data, M_DEVBUF);
2277         }
2278
2279         return error;
2280 }
2281
2282 /*
2283  * Firmware loading gunk.  This is kind of a weird hybrid between the
2284  * iwn driver and the Linux iwlwifi driver.
2285  */
2286
2287 static int
2288 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2289         const uint8_t *section, uint32_t byte_cnt)
2290 {
2291         int error = EINVAL;
2292         uint32_t chunk_sz, offset;
2293
2294         chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2295
2296         for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2297                 uint32_t addr, len;
2298                 const uint8_t *data;
2299
2300                 addr = dst_addr + offset;
2301                 len = MIN(chunk_sz, byte_cnt - offset);
2302                 data = section + offset;
2303
2304                 error = iwm_firmware_load_chunk(sc, addr, data, len);
2305                 if (error)
2306                         break;
2307         }
2308
2309         return error;
2310 }
2311
2312 static int
2313 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2314         const uint8_t *chunk, uint32_t byte_cnt)
2315 {
2316         struct iwm_dma_info *dma = &sc->fw_dma;
2317         int error;
2318
2319         /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2320         memcpy(dma->vaddr, chunk, byte_cnt);
2321         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2322
2323         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2324             dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2325                 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2326                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2327         }
2328
2329         sc->sc_fw_chunk_done = 0;
2330
2331         if (!iwm_nic_lock(sc))
2332                 return EBUSY;
2333
2334         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2335             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2336         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2337             dst_addr);
2338         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2339             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2340         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2341             (iwm_get_dma_hi_addr(dma->paddr)
2342               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2343         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2344             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2345             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2346             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2347         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2348             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2349             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2350             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2351
2352         iwm_nic_unlock(sc);
2353
2354         /* wait 1s for this segment to load */
2355         while (!sc->sc_fw_chunk_done)
2356                 if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
2357                         break;
2358
2359         if (!sc->sc_fw_chunk_done) {
2360                 device_printf(sc->sc_dev,
2361                     "fw chunk addr 0x%x len %d failed to load\n",
2362                     dst_addr, byte_cnt);
2363         }
2364
2365         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2366             dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2367                 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2368                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2369                 iwm_nic_unlock(sc);
2370         }
2371
2372         return error;
2373 }
2374
2375 int
2376 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2377     int cpu, int *first_ucode_section)
2378 {
2379         int shift_param;
2380         int i, error = 0, sec_num = 0x1;
2381         uint32_t val, last_read_idx = 0;
2382         const void *data;
2383         uint32_t dlen;
2384         uint32_t offset;
2385
2386         if (cpu == 1) {
2387                 shift_param = 0;
2388                 *first_ucode_section = 0;
2389         } else {
2390                 shift_param = 16;
2391                 (*first_ucode_section)++;
2392         }
2393
2394         for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
2395                 last_read_idx = i;
2396                 data = fws->fw_sect[i].fws_data;
2397                 dlen = fws->fw_sect[i].fws_len;
2398                 offset = fws->fw_sect[i].fws_devoff;
2399
2400                 /*
2401                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2402                  * CPU1 to CPU2.
2403                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2404                  * CPU2 non paged to CPU2 paging sec.
2405                  */
2406                 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2407                     offset == IWM_PAGING_SEPARATOR_SECTION)
2408                         break;
2409
2410                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2411                     "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2412                     i, offset, dlen, cpu);
2413
2414                 if (dlen > sc->sc_fwdmasegsz) {
2415                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2416                             "chunk %d too large (%d bytes)\n", i, dlen);
2417                         error = EFBIG;
2418                 } else {
2419                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2420                 }
2421                 if (error) {
2422                         device_printf(sc->sc_dev,
2423                             "could not load firmware chunk %d (error %d)\n",
2424                             i, error);
2425                         return error;
2426                 }
2427
2428                 /* Notify the ucode of the loaded section number and status */
2429                 if (iwm_nic_lock(sc)) {
2430                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2431                         val = val | (sec_num << shift_param);
2432                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2433                         sec_num = (sec_num << 1) | 0x1;
2434                         iwm_nic_unlock(sc);
2435
2436                         /*
2437                          * The firmware won't load correctly without this delay.
2438                          */
2439                         DELAY(8000);
2440                 }
2441         }
2442
2443         *first_ucode_section = last_read_idx;
2444
2445         if (iwm_nic_lock(sc)) {
2446                 if (cpu == 1)
2447                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2448                 else
2449                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2450                 iwm_nic_unlock(sc);
2451         }
2452
2453         return 0;
2454 }
2455
2456 int
2457 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2458 {
2459         struct iwm_fw_sects *fws;
2460         int error = 0;
2461         int first_ucode_section;
2462
2463         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2464             ucode_type);
2465
2466         fws = &sc->sc_fw.fw_sects[ucode_type];
2467
2468         /* configure the ucode to be ready to get the secured image */
2469         /* release CPU reset */
2470         iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2471
2472         /* load to FW the binary Secured sections of CPU1 */
2473         error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2474         if (error)
2475                 return error;
2476
2477         /* load to FW the binary sections of CPU2 */
2478         return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2479 }
2480
2481 static int
2482 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2483 {
2484         struct iwm_fw_sects *fws;
2485         int error, i;
2486         const void *data;
2487         uint32_t dlen;
2488         uint32_t offset;
2489
2490         sc->sc_uc.uc_intr = 0;
2491
2492         fws = &sc->sc_fw.fw_sects[ucode_type];
2493         for (i = 0; i < fws->fw_count; i++) {
2494                 data = fws->fw_sect[i].fws_data;
2495                 dlen = fws->fw_sect[i].fws_len;
2496                 offset = fws->fw_sect[i].fws_devoff;
2497                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2498                     "LOAD FIRMWARE type %d offset %u len %d\n",
2499                     ucode_type, offset, dlen);
2500                 if (dlen > sc->sc_fwdmasegsz) {
2501                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2502                             "chunk %d too large (%d bytes)\n", i, dlen);
2503                         error = EFBIG;
2504                 } else {
2505                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2506                 }
2507                 if (error) {
2508                         device_printf(sc->sc_dev,
2509                             "could not load firmware chunk %u of %u "
2510                             "(error=%d)\n", i, fws->fw_count, error);
2511                         return error;
2512                 }
2513         }
2514
2515         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2516
2517         return 0;
2518 }
2519
2520 static int
2521 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2522 {
2523         int error, w;
2524
2525         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
2526                 error = iwm_load_firmware_8000(sc, ucode_type);
2527         else
2528                 error = iwm_load_firmware_7000(sc, ucode_type);
2529         if (error)
2530                 return error;
2531
2532         /* wait for the firmware to load */
2533         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2534                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2535         }
2536         if (error || !sc->sc_uc.uc_ok) {
2537                 device_printf(sc->sc_dev, "could not load firmware\n");
2538                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2539                         device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2540                             iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2541                         device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2542                             iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2543                 }
2544         }
2545
2546         /*
2547          * Give the firmware some time to initialize.
2548          * Accessing it too early causes errors.
2549          */
2550         msleep(&w, &sc->sc_mtx, 0, "iwmfwinit", hz);
2551
2552         return error;
2553 }
2554
2555 /* iwlwifi: pcie/trans.c */
2556 static int
2557 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2558 {
2559         int error;
2560
2561         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2562
2563         if ((error = iwm_nic_init(sc)) != 0) {
2564                 device_printf(sc->sc_dev, "unable to init nic\n");
2565                 return error;
2566         }
2567
2568         /* make sure rfkill handshake bits are cleared */
2569         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2570         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2571             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2572
2573         /* clear (again), then enable host interrupts */
2574         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2575         iwm_enable_interrupts(sc);
2576
2577         /* really make sure rfkill handshake bits are cleared */
2578         /* maybe we should write a few times more?  just to make sure */
2579         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2580         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2581
2582         /* Load the given image to the HW */
2583         return iwm_load_firmware(sc, ucode_type);
2584 }
2585
2586 static int
2587 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2588 {
2589         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2590                 .valid = htole32(valid_tx_ant),
2591         };
2592
2593         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2594             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2595 }
2596
2597 /* iwlwifi: mvm/fw.c */
2598 static int
2599 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2600 {
2601         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2602         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2603
2604         /* Set parameters */
2605         phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2606         phy_cfg_cmd.calib_control.event_trigger =
2607             sc->sc_default_calib[ucode_type].event_trigger;
2608         phy_cfg_cmd.calib_control.flow_trigger =
2609             sc->sc_default_calib[ucode_type].flow_trigger;
2610
2611         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2612             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2613         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2614             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2615 }
2616
2617 static int
2618 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2619         enum iwm_ucode_type ucode_type)
2620 {
2621         enum iwm_ucode_type old_type = sc->sc_uc_current;
2622         int error;
2623
2624         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2625                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2626                         error);
2627                 return error;
2628         }
2629
2630         sc->sc_uc_current = ucode_type;
2631         error = iwm_start_fw(sc, ucode_type);
2632         if (error) {
2633                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2634                 sc->sc_uc_current = old_type;
2635                 return error;
2636         }
2637
2638         error = iwm_post_alive(sc);
2639         if (error) {
2640                 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2641         }
2642         return error;
2643 }
2644
2645 /*
2646  * mvm misc bits
2647  */
2648
2649 /*
2650  * follows iwlwifi/fw.c
2651  */
2652 static int
2653 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2654 {
2655         int error;
2656
2657         /* do not operate with rfkill switch turned on */
2658         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2659                 device_printf(sc->sc_dev,
2660                     "radio is disabled by hardware switch\n");
2661                 return EPERM;
2662         }
2663
2664         sc->sc_init_complete = 0;
2665         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2666             IWM_UCODE_TYPE_INIT)) != 0) {
2667                 device_printf(sc->sc_dev, "failed to load init firmware\n");
2668                 return error;
2669         }
2670
2671         if (justnvm) {
2672                 if ((error = iwm_nvm_init(sc)) != 0) {
2673                         device_printf(sc->sc_dev, "failed to read nvm\n");
2674                         return error;
2675                 }
2676                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2677
2678                 return 0;
2679         }
2680
2681         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
2682                 device_printf(sc->sc_dev,
2683                     "failed to send bt coex configuration: %d\n", error);
2684                 return error;
2685         }
2686
2687         /* Init Smart FIFO. */
2688         error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2689         if (error != 0)
2690                 return error;
2691
2692         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2693             "%s: phy_txant=0x%08x, nvm_valid_tx_ant=0x%02x, valid=0x%02x\n",
2694             __func__,
2695             ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2696               >> IWM_FW_PHY_CFG_TX_CHAIN_POS),
2697             sc->sc_nvm.valid_tx_ant,
2698             iwm_fw_valid_tx_ant(sc));
2699
2700
2701         /* Send TX valid antennas before triggering calibrations */
2702         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
2703                 device_printf(sc->sc_dev,
2704                     "failed to send antennas before calibration: %d\n", error);
2705                 return error;
2706         }
2707
2708         /*
2709          * Send phy configurations command to init uCode
2710          * to start the 16.0 uCode init image internal calibrations.
2711          */
2712         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2713                 device_printf(sc->sc_dev,
2714                     "%s: failed to run internal calibration: %d\n",
2715                     __func__, error);
2716                 return error;
2717         }
2718
2719         /*
2720          * Nothing to do but wait for the init complete notification
2721          * from the firmware
2722          */
2723         while (!sc->sc_init_complete) {
2724                 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2725                                  0, "iwminit", 2*hz);
2726                 if (error) {
2727                         device_printf(sc->sc_dev, "init complete failed: %d\n",
2728                                 sc->sc_init_complete);
2729                         break;
2730                 }
2731         }
2732
2733         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
2734             sc->sc_init_complete ? "" : "not ");
2735
2736         return error;
2737 }
2738
2739 /*
2740  * receive side
2741  */
2742
2743 /* (re)stock rx ring, called at init-time and at runtime */
2744 static int
2745 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2746 {
2747         struct iwm_rx_ring *ring = &sc->rxq;
2748         struct iwm_rx_data *data = &ring->data[idx];
2749         struct mbuf *m;
2750         bus_dmamap_t dmamap = NULL;
2751         bus_dma_segment_t seg;
2752         int nsegs, error;
2753
2754         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2755         if (m == NULL)
2756                 return ENOBUFS;
2757
2758         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2759         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2760             &seg, &nsegs, BUS_DMA_NOWAIT);
2761         if (error != 0) {
2762                 device_printf(sc->sc_dev,
2763                     "%s: can't map mbuf, error %d\n", __func__, error);
2764                 goto fail;
2765         }
2766
2767         if (data->m != NULL)
2768                 bus_dmamap_unload(ring->data_dmat, data->map);
2769
2770         /* Swap ring->spare_map with data->map */
2771         dmamap = data->map;
2772         data->map = ring->spare_map;
2773         ring->spare_map = dmamap;
2774
2775         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2776         data->m = m;
2777
2778         /* Update RX descriptor. */
2779         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
2780         ring->desc[idx] = htole32(seg.ds_addr >> 8);
2781         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2782             BUS_DMASYNC_PREWRITE);
2783
2784         return 0;
2785 fail:
2786         m_freem(m);
2787         return error;
2788 }
2789
2790 /* iwlwifi: mvm/rx.c */
2791 #define IWM_RSSI_OFFSET 50
2792 static int
2793 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2794 {
2795         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2796         uint32_t agc_a, agc_b;
2797         uint32_t val;
2798
2799         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2800         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2801         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2802
2803         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2804         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2805         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2806
2807         /*
2808          * dBm = rssi dB - agc dB - constant.
2809          * Higher AGC (higher radio gain) means lower signal.
2810          */
2811         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2812         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2813         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2814
2815         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2816             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2817             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2818
2819         return max_rssi_dbm;
2820 }
2821
2822 /* iwlwifi: mvm/rx.c */
2823 /*
2824  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2825  * values are reported by the fw as positive values - need to negate
2826  * to obtain their dBM.  Account for missing antennas by replacing 0
2827  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2828  */
2829 static int
2830 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2831 {
2832         int energy_a, energy_b, energy_c, max_energy;
2833         uint32_t val;
2834
2835         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2836         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2837             IWM_RX_INFO_ENERGY_ANT_A_POS;
2838         energy_a = energy_a ? -energy_a : -256;
2839         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2840             IWM_RX_INFO_ENERGY_ANT_B_POS;
2841         energy_b = energy_b ? -energy_b : -256;
2842         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2843             IWM_RX_INFO_ENERGY_ANT_C_POS;
2844         energy_c = energy_c ? -energy_c : -256;
2845         max_energy = MAX(energy_a, energy_b);
2846         max_energy = MAX(max_energy, energy_c);
2847
2848         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2849             "energy In A %d B %d C %d , and max %d\n",
2850             energy_a, energy_b, energy_c, max_energy);
2851
2852         return max_energy;
2853 }
2854
2855 static void
2856 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2857         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2858 {
2859         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2860
2861         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2862         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2863
2864         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2865 }
2866
2867 /*
2868  * Retrieve the average noise (in dBm) among receivers.
2869  */
2870 static int
2871 iwm_get_noise(struct iwm_softc *sc,
2872     const struct iwm_mvm_statistics_rx_non_phy *stats)
2873 {
2874         int i, total, nbant, noise;
2875
2876         total = nbant = noise = 0;
2877         for (i = 0; i < 3; i++) {
2878                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2879                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
2880                     __func__,
2881                     i,
2882                     noise);
2883
2884                 if (noise) {
2885                         total += noise;
2886                         nbant++;
2887                 }
2888         }
2889
2890         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
2891             __func__, nbant, total);
2892 #if 0
2893         /* There should be at least one antenna but check anyway. */
2894         return (nbant == 0) ? -127 : (total / nbant) - 107;
2895 #else
2896         /* For now, just hard-code it to -96 to be safe */
2897         return (-96);
2898 #endif
2899 }
2900
2901 /*
2902  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2903  *
2904  * Handles the actual data of the Rx packet from the fw
2905  */
2906 static void
2907 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2908         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2909 {
2910         struct ieee80211com *ic = &sc->sc_ic;
2911         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2912         struct ieee80211_frame *wh;
2913         struct ieee80211_node *ni;
2914         struct ieee80211_rx_stats rxs;
2915         struct mbuf *m;
2916         struct iwm_rx_phy_info *phy_info;
2917         struct iwm_rx_mpdu_res_start *rx_res;
2918         uint32_t len;
2919         uint32_t rx_pkt_status;
2920         int rssi;
2921
2922         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2923
2924         phy_info = &sc->sc_last_phy_info;
2925         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2926         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2927         len = le16toh(rx_res->byte_count);
2928         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2929
2930         m = data->m;
2931         m->m_data = pkt->data + sizeof(*rx_res);
2932         m->m_pkthdr.len = m->m_len = len;
2933
2934         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2935                 device_printf(sc->sc_dev,
2936                     "dsp size out of range [0,20]: %d\n",
2937                     phy_info->cfg_phy_cnt);
2938                 goto fail;
2939         }
2940
2941         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2942             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2943                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2944                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2945                 goto fail;
2946         }
2947
2948         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2949                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2950         } else {
2951                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
2952         }
2953
2954         /* Note: RSSI is absolute (ie a -ve value) */
2955         if (rssi < IWM_MIN_DBM)
2956                 rssi = IWM_MIN_DBM;
2957         else if (rssi > IWM_MAX_DBM)
2958                 rssi = IWM_MAX_DBM;
2959
2960         /* Map it to relative value */
2961         rssi = rssi - sc->sc_noise;
2962
2963         /* replenish ring for the buffer we're going to feed to the sharks */
2964         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
2965                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
2966                     __func__);
2967                 goto fail;
2968         }
2969
2970         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2971             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
2972
2973         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2974
2975         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2976             "%s: phy_info: channel=%d, flags=0x%08x\n",
2977             __func__,
2978             le16toh(phy_info->channel),
2979             le16toh(phy_info->phy_flags));
2980
2981         /*
2982          * Populate an RX state struct with the provided information.
2983          */
2984         bzero(&rxs, sizeof(rxs));
2985         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
2986         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
2987         rxs.c_ieee = le16toh(phy_info->channel);
2988         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2989                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
2990         } else {
2991                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
2992         }
2993
2994         /* rssi is in 1/2db units */
2995         rxs.c_rssi = rssi * 2;
2996         rxs.c_nf = sc->sc_noise;
2997         if (ieee80211_add_rx_params(m, &rxs) == 0)
2998                 goto fail;
2999
3000         if (ieee80211_radiotap_active_vap(vap)) {
3001                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3002
3003                 tap->wr_flags = 0;
3004                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3005                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3006                 tap->wr_chan_freq = htole16(rxs.c_freq);
3007                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3008                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3009                 tap->wr_dbm_antsignal = (int8_t)rssi;
3010                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3011                 tap->wr_tsft = phy_info->system_timestamp;
3012                 switch (phy_info->rate) {
3013                 /* CCK rates. */
3014                 case  10: tap->wr_rate =   2; break;
3015                 case  20: tap->wr_rate =   4; break;
3016                 case  55: tap->wr_rate =  11; break;
3017                 case 110: tap->wr_rate =  22; break;
3018                 /* OFDM rates. */
3019                 case 0xd: tap->wr_rate =  12; break;
3020                 case 0xf: tap->wr_rate =  18; break;
3021                 case 0x5: tap->wr_rate =  24; break;
3022                 case 0x7: tap->wr_rate =  36; break;
3023                 case 0x9: tap->wr_rate =  48; break;
3024                 case 0xb: tap->wr_rate =  72; break;
3025                 case 0x1: tap->wr_rate =  96; break;
3026                 case 0x3: tap->wr_rate = 108; break;
3027                 /* Unknown rate: should not happen. */
3028                 default:  tap->wr_rate =   0;
3029                 }
3030         }
3031
3032         IWM_UNLOCK(sc);
3033         if (ni != NULL) {
3034                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3035                 ieee80211_input_mimo(ni, m);
3036                 ieee80211_free_node(ni);
3037         } else {
3038                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3039                 ieee80211_input_mimo_all(ic, m);
3040         }
3041         IWM_LOCK(sc);
3042
3043         return;
3044
3045 fail:   counter_u64_add(ic->ic_ierrors, 1);
3046 }
3047
3048 static int
3049 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3050         struct iwm_node *in)
3051 {
3052         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3053         struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3054         struct ieee80211_node *ni = &in->in_ni;
3055         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3056
3057         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3058
3059         /* Update rate control statistics. */
3060         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3061             __func__,
3062             (int) le16toh(tx_resp->status.status),
3063             (int) le16toh(tx_resp->status.sequence),
3064             tx_resp->frame_count,
3065             tx_resp->bt_kill_count,
3066             tx_resp->failure_rts,
3067             tx_resp->failure_frame,
3068             le32toh(tx_resp->initial_rate),
3069             (int) le16toh(tx_resp->wireless_media_time));
3070
3071         txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3072                      IEEE80211_RATECTL_STATUS_LONG_RETRY;
3073         txs->short_retries = tx_resp->failure_rts;
3074         txs->long_retries = tx_resp->failure_frame;
3075         if (status != IWM_TX_STATUS_SUCCESS &&
3076             status != IWM_TX_STATUS_DIRECT_DONE) {
3077                 switch (status) {
3078                 case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3079                         txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3080                         break;
3081                 case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3082                         txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3083                         break;
3084                 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3085                         txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3086                         break;
3087                 default:
3088                         txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3089                         break;
3090                 }
3091         } else {
3092                 txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3093         }
3094         ieee80211_ratectl_tx_complete(ni, txs);
3095
3096         return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3097 }
3098
3099 static void
3100 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3101         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3102 {
3103         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3104         int idx = cmd_hdr->idx;
3105         int qid = cmd_hdr->qid;
3106         struct iwm_tx_ring *ring = &sc->txq[qid];
3107         struct iwm_tx_data *txd = &ring->data[idx];
3108         struct iwm_node *in = txd->in;
3109         struct mbuf *m = txd->m;
3110         int status;
3111
3112         KASSERT(txd->done == 0, ("txd not done"));
3113         KASSERT(txd->in != NULL, ("txd without node"));
3114         KASSERT(txd->m != NULL, ("txd without mbuf"));
3115
3116         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3117
3118         sc->sc_tx_timer = 0;
3119
3120         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3121
3122         /* Unmap and free mbuf. */
3123         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3124         bus_dmamap_unload(ring->data_dmat, txd->map);
3125
3126         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3127             "free txd %p, in %p\n", txd, txd->in);
3128         txd->done = 1;
3129         txd->m = NULL;
3130         txd->in = NULL;
3131
3132         ieee80211_tx_complete(&in->in_ni, m, status);
3133
3134         if (--ring->queued < IWM_TX_RING_LOMARK) {
3135                 sc->qfullmsk &= ~(1 << ring->qid);
3136                 if (sc->qfullmsk == 0) {
3137                         /*
3138                          * Well, we're in interrupt context, but then again
3139                          * I guess net80211 does all sorts of stunts in
3140                          * interrupt context, so maybe this is no biggie.
3141                          */
3142                         iwm_start(sc);
3143                 }
3144         }
3145 }
3146
3147 /*
3148  * transmit side
3149  */
3150
3151 /*
3152  * Process a "command done" firmware notification.  This is where we wakeup
3153  * processes waiting for a synchronous command completion.
3154  * from if_iwn
3155  */
3156 static void
3157 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3158 {
3159         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3160         struct iwm_tx_data *data;
3161
3162         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3163                 return; /* Not a command ack. */
3164         }
3165
3166         /* XXX wide commands? */
3167         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3168             "cmd notification type 0x%x qid %d idx %d\n",
3169             pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3170
3171         data = &ring->data[pkt->hdr.idx];
3172
3173         /* If the command was mapped in an mbuf, free it. */
3174         if (data->m != NULL) {
3175                 bus_dmamap_sync(ring->data_dmat, data->map,
3176                     BUS_DMASYNC_POSTWRITE);
3177                 bus_dmamap_unload(ring->data_dmat, data->map);
3178                 m_freem(data->m);
3179                 data->m = NULL;
3180         }
3181         wakeup(&ring->desc[pkt->hdr.idx]);
3182 }
3183
3184 #if 0
3185 /*
3186  * necessary only for block ack mode
3187  */
3188 void
3189 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3190         uint16_t len)
3191 {
3192         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3193         uint16_t w_val;
3194
3195         scd_bc_tbl = sc->sched_dma.vaddr;
3196
3197         len += 8; /* magic numbers came naturally from paris */
3198         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3199                 len = roundup(len, 4) / 4;
3200
3201         w_val = htole16(sta_id << 12 | len);
3202
3203         /* Update TX scheduler. */
3204         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3205         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3206             BUS_DMASYNC_PREWRITE);
3207
3208         /* I really wonder what this is ?!? */
3209         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3210                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3211                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3212                     BUS_DMASYNC_PREWRITE);
3213         }
3214 }
3215 #endif
3216
3217 /*
3218  * Take an 802.11 (non-n) rate, find the relevant rate
3219  * table entry.  return the index into in_ridx[].
3220  *
3221  * The caller then uses that index back into in_ridx
3222  * to figure out the rate index programmed /into/
3223  * the firmware for this given node.
3224  */
3225 static int
3226 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3227     uint8_t rate)
3228 {
3229         int i;
3230         uint8_t r;
3231
3232         for (i = 0; i < nitems(in->in_ridx); i++) {
3233                 r = iwm_rates[in->in_ridx[i]].rate;
3234                 if (rate == r)
3235                         return (i);
3236         }
3237
3238         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3239             "%s: couldn't find an entry for rate=%d\n",
3240             __func__,
3241             rate);
3242
3243         /* XXX Return the first */
3244         /* XXX TODO: have it return the /lowest/ */
3245         return (0);
3246 }
3247
3248 static int
3249 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3250 {
3251         int i;
3252
3253         for (i = 0; i < nitems(iwm_rates); i++) {
3254                 if (iwm_rates[i].rate == rate)
3255                         return (i);
3256         }
3257         /* XXX error? */
3258         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3259             "%s: couldn't find an entry for rate=%d\n",
3260             __func__,
3261             rate);
3262         return (0);
3263 }
3264
3265 /*
3266  * Fill in the rate related information for a transmit command.
3267  */
3268 static const struct iwm_rate *
3269 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3270         struct mbuf *m, struct iwm_tx_cmd *tx)
3271 {
3272         struct ieee80211_node *ni = &in->in_ni;
3273         struct ieee80211_frame *wh;
3274         const struct ieee80211_txparam *tp = ni->ni_txparms;
3275         const struct iwm_rate *rinfo;
3276         int type;
3277         int ridx, rate_flags;
3278
3279         wh = mtod(m, struct ieee80211_frame *);
3280         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3281
3282         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3283         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3284
3285         if (type == IEEE80211_FC0_TYPE_MGT) {
3286                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3287                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3288                     "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3289         } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3290                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3291                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3292                     "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3293         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3294                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3295                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3296                     "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3297         } else if (m->m_flags & M_EAPOL) {
3298                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3299                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3300                     "%s: EAPOL\n", __func__);
3301         } else if (type == IEEE80211_FC0_TYPE_DATA) {
3302                 int i;
3303
3304                 /* for data frames, use RS table */
3305                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3306                 /* XXX pass pktlen */
3307                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3308                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3309                 ridx = in->in_ridx[i];
3310
3311                 /* This is the index into the programmed table */
3312                 tx->initial_rate_index = i;
3313                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3314
3315                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3316                     "%s: start with i=%d, txrate %d\n",
3317                     __func__, i, iwm_rates[ridx].rate);
3318         } else {
3319                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3320                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3321                     __func__, tp->mgmtrate);
3322         }
3323
3324         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3325             "%s: frame type=%d txrate %d\n",
3326                 __func__, type, iwm_rates[ridx].rate);
3327
3328         rinfo = &iwm_rates[ridx];
3329
3330         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3331             __func__, ridx,
3332             rinfo->rate,
3333             !! (IWM_RIDX_IS_CCK(ridx))
3334             );
3335
3336         /* XXX TODO: hard-coded TX antenna? */
3337         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3338         if (IWM_RIDX_IS_CCK(ridx))
3339                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3340         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3341
3342         return rinfo;
3343 }
3344
3345 #define TB0_SIZE 16
3346 static int
3347 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3348 {
3349         struct ieee80211com *ic = &sc->sc_ic;
3350         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3351         struct iwm_node *in = IWM_NODE(ni);
3352         struct iwm_tx_ring *ring;
3353         struct iwm_tx_data *data;
3354         struct iwm_tfd *desc;
3355         struct iwm_device_cmd *cmd;
3356         struct iwm_tx_cmd *tx;
3357         struct ieee80211_frame *wh;
3358         struct ieee80211_key *k = NULL;
3359         struct mbuf *m1;
3360         const struct iwm_rate *rinfo;
3361         uint32_t flags;
3362         u_int hdrlen;
3363         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3364         int nsegs;
3365         uint8_t tid, type;
3366         int i, totlen, error, pad;
3367
3368         wh = mtod(m, struct ieee80211_frame *);
3369         hdrlen = ieee80211_anyhdrsize(wh);
3370         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3371         tid = 0;
3372         ring = &sc->txq[ac];
3373         desc = &ring->desc[ring->cur];
3374         memset(desc, 0, sizeof(*desc));
3375         data = &ring->data[ring->cur];
3376
3377         /* Fill out iwm_tx_cmd to send to the firmware */
3378         cmd = &ring->cmd[ring->cur];
3379         cmd->hdr.code = IWM_TX_CMD;
3380         cmd->hdr.flags = 0;
3381         cmd->hdr.qid = ring->qid;
3382         cmd->hdr.idx = ring->cur;
3383
3384         tx = (void *)cmd->data;
3385         memset(tx, 0, sizeof(*tx));
3386
3387         rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3388
3389         /* Encrypt the frame if need be. */
3390         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3391                 /* Retrieve key for TX && do software encryption. */
3392                 k = ieee80211_crypto_encap(ni, m);
3393                 if (k == NULL) {
3394                         m_freem(m);
3395                         return (ENOBUFS);
3396                 }
3397                 /* 802.11 header may have moved. */
3398                 wh = mtod(m, struct ieee80211_frame *);
3399         }
3400
3401         if (ieee80211_radiotap_active_vap(vap)) {
3402                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3403
3404                 tap->wt_flags = 0;
3405                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3406                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3407                 tap->wt_rate = rinfo->rate;
3408                 if (k != NULL)
3409                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3410                 ieee80211_radiotap_tx(vap, m);
3411         }
3412
3413
3414         totlen = m->m_pkthdr.len;
3415
3416         flags = 0;
3417         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3418                 flags |= IWM_TX_CMD_FLG_ACK;
3419         }
3420
3421         if (type == IEEE80211_FC0_TYPE_DATA
3422             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3423             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3424                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3425         }
3426
3427         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3428             type != IEEE80211_FC0_TYPE_DATA)
3429                 tx->sta_id = sc->sc_aux_sta.sta_id;
3430         else
3431                 tx->sta_id = IWM_STATION_ID;
3432
3433         if (type == IEEE80211_FC0_TYPE_MGT) {
3434                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3435
3436                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3437                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3438                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3439                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3440                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3441                 } else {
3442                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3443                 }
3444         } else {
3445                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3446         }
3447
3448         if (hdrlen & 3) {
3449                 /* First segment length must be a multiple of 4. */
3450                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3451                 pad = 4 - (hdrlen & 3);
3452         } else
3453                 pad = 0;
3454
3455         tx->driver_txop = 0;
3456         tx->next_frame_len = 0;
3457
3458         tx->len = htole16(totlen);
3459         tx->tid_tspec = tid;
3460         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3461
3462         /* Set physical address of "scratch area". */
3463         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3464         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3465
3466         /* Copy 802.11 header in TX command. */
3467         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3468
3469         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3470
3471         tx->sec_ctl = 0;
3472         tx->tx_flags |= htole32(flags);
3473
3474         /* Trim 802.11 header. */
3475         m_adj(m, hdrlen);
3476         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3477             segs, &nsegs, BUS_DMA_NOWAIT);
3478         if (error != 0) {
3479                 if (error != EFBIG) {
3480                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3481                             error);
3482                         m_freem(m);
3483                         return error;
3484                 }
3485                 /* Too many DMA segments, linearize mbuf. */
3486                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3487                 if (m1 == NULL) {
3488                         device_printf(sc->sc_dev,
3489                             "%s: could not defrag mbuf\n", __func__);
3490                         m_freem(m);
3491                         return (ENOBUFS);
3492                 }
3493                 m = m1;
3494
3495                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3496                     segs, &nsegs, BUS_DMA_NOWAIT);
3497                 if (error != 0) {
3498                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3499                             error);
3500                         m_freem(m);
3501                         return error;
3502                 }
3503         }
3504         data->m = m;
3505         data->in = in;
3506         data->done = 0;
3507
3508         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3509             "sending txd %p, in %p\n", data, data->in);
3510         KASSERT(data->in != NULL, ("node is NULL"));
3511
3512         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3513             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3514             ring->qid, ring->cur, totlen, nsegs,
3515             le32toh(tx->tx_flags),
3516             le32toh(tx->rate_n_flags),
3517             tx->initial_rate_index
3518             );
3519
3520         /* Fill TX descriptor. */
3521         desc->num_tbs = 2 + nsegs;
3522
3523         desc->tbs[0].lo = htole32(data->cmd_paddr);
3524         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3525             (TB0_SIZE << 4);
3526         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3527         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3528             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3529               + hdrlen + pad - TB0_SIZE) << 4);
3530
3531         /* Other DMA segments are for data payload. */
3532         for (i = 0; i < nsegs; i++) {
3533                 seg = &segs[i];
3534                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3535                 desc->tbs[i+2].hi_n_len = \
3536                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3537                     | ((seg->ds_len) << 4);
3538         }
3539
3540         bus_dmamap_sync(ring->data_dmat, data->map,
3541             BUS_DMASYNC_PREWRITE);
3542         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3543             BUS_DMASYNC_PREWRITE);
3544         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3545             BUS_DMASYNC_PREWRITE);
3546
3547 #if 0
3548         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3549 #endif
3550
3551         /* Kick TX ring. */
3552         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3553         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3554
3555         /* Mark TX ring as full if we reach a certain threshold. */
3556         if (++ring->queued > IWM_TX_RING_HIMARK) {
3557                 sc->qfullmsk |= 1 << ring->qid;
3558         }
3559
3560         return 0;
3561 }
3562
3563 static int
3564 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3565     const struct ieee80211_bpf_params *params)
3566 {
3567         struct ieee80211com *ic = ni->ni_ic;
3568         struct iwm_softc *sc = ic->ic_softc;
3569         int error = 0;
3570
3571         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3572             "->%s begin\n", __func__);
3573
3574         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3575                 m_freem(m);
3576                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3577                     "<-%s not RUNNING\n", __func__);
3578                 return (ENETDOWN);
3579         }
3580
3581         IWM_LOCK(sc);
3582         /* XXX fix this */
3583         if (params == NULL) {
3584                 error = iwm_tx(sc, m, ni, 0);
3585         } else {
3586                 error = iwm_tx(sc, m, ni, 0);
3587         }
3588         sc->sc_tx_timer = 5;
3589         IWM_UNLOCK(sc);
3590
3591         return (error);
3592 }
3593
3594 /*
3595  * mvm/tx.c
3596  */
3597
3598 /*
3599  * Note that there are transports that buffer frames before they reach
3600  * the firmware. This means that after flush_tx_path is called, the
3601  * queue might not be empty. The race-free way to handle this is to:
3602  * 1) set the station as draining
3603  * 2) flush the Tx path
3604  * 3) wait for the transport queues to be empty
3605  */
3606 int
3607 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3608 {
3609         int ret;
3610         struct iwm_tx_path_flush_cmd flush_cmd = {
3611                 .queues_ctl = htole32(tfd_msk),
3612                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3613         };
3614
3615         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3616             sizeof(flush_cmd), &flush_cmd);
3617         if (ret)
3618                 device_printf(sc->sc_dev,
3619                     "Flushing tx queue failed: %d\n", ret);
3620         return ret;
3621 }
3622
3623 /*
3624  * BEGIN mvm/sta.c
3625  */
3626
3627 static int
3628 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3629         struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3630 {
3631         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3632             cmd, status);
3633 }
3634
3635 /* send station add/update command to firmware */
3636 static int
3637 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3638 {
3639         struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3640         int ret;
3641         uint32_t status;
3642
3643         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3644
3645         add_sta_cmd.sta_id = IWM_STATION_ID;
3646         add_sta_cmd.mac_id_n_color
3647             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3648                 IWM_DEFAULT_COLOR));
3649         if (!update) {
3650                 int ac;
3651                 for (ac = 0; ac < WME_NUM_AC; ac++) {
3652                         add_sta_cmd.tfd_queue_msk |=
3653                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3654                 }
3655                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3656         }
3657         add_sta_cmd.add_modify = update ? 1 : 0;
3658         add_sta_cmd.station_flags_msk
3659             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3660         add_sta_cmd.tid_disable_tx = htole16(0xffff);
3661         if (update)
3662                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3663
3664         status = IWM_ADD_STA_SUCCESS;
3665         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3666         if (ret)
3667                 return ret;
3668
3669         switch (status) {
3670         case IWM_ADD_STA_SUCCESS:
3671                 break;
3672         default:
3673                 ret = EIO;
3674                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3675                 break;
3676         }
3677
3678         return ret;
3679 }
3680
3681 static int
3682 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3683 {
3684         return iwm_mvm_sta_send_to_fw(sc, in, 0);
3685 }
3686
3687 static int
3688 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3689 {
3690         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3691 }
3692
3693 static int
3694 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3695         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3696 {
3697         struct iwm_mvm_add_sta_cmd_v7 cmd;
3698         int ret;
3699         uint32_t status;
3700
3701         memset(&cmd, 0, sizeof(cmd));
3702         cmd.sta_id = sta->sta_id;
3703         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3704
3705         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3706         cmd.tid_disable_tx = htole16(0xffff);
3707
3708         if (addr)
3709                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3710
3711         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3712         if (ret)
3713                 return ret;
3714
3715         switch (status) {
3716         case IWM_ADD_STA_SUCCESS:
3717                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3718                     "%s: Internal station added.\n", __func__);
3719                 return 0;
3720         default:
3721                 device_printf(sc->sc_dev,
3722                     "%s: Add internal station failed, status=0x%x\n",
3723                     __func__, status);
3724                 ret = EIO;
3725                 break;
3726         }
3727         return ret;
3728 }
3729
3730 static int
3731 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3732 {
3733         int ret;
3734
3735         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3736         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3737
3738         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3739         if (ret)
3740                 return ret;
3741
3742         ret = iwm_mvm_add_int_sta_common(sc,
3743             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3744
3745         if (ret)
3746                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3747         return ret;
3748 }
3749
3750 /*
3751  * END mvm/sta.c
3752  */
3753
3754 /*
3755  * BEGIN mvm/quota.c
3756  */
3757
3758 static int
3759 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3760 {
3761         struct iwm_time_quota_cmd cmd;
3762         int i, idx, ret, num_active_macs, quota, quota_rem;
3763         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3764         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3765         uint16_t id;
3766
3767         memset(&cmd, 0, sizeof(cmd));
3768
3769         /* currently, PHY ID == binding ID */
3770         if (in) {
3771                 id = in->in_phyctxt->id;
3772                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3773                 colors[id] = in->in_phyctxt->color;
3774
3775                 if (1)
3776                         n_ifs[id] = 1;
3777         }
3778
3779         /*
3780          * The FW's scheduling session consists of
3781          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3782          * equally between all the bindings that require quota
3783          */
3784         num_active_macs = 0;
3785         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3786                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3787                 num_active_macs += n_ifs[i];
3788         }
3789
3790         quota = 0;
3791         quota_rem = 0;
3792         if (num_active_macs) {
3793                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3794                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3795         }
3796
3797         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3798                 if (colors[i] < 0)
3799                         continue;
3800
3801                 cmd.quotas[idx].id_and_color =
3802                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3803
3804                 if (n_ifs[i] <= 0) {
3805                         cmd.quotas[idx].quota = htole32(0);
3806                         cmd.quotas[idx].max_duration = htole32(0);
3807                 } else {
3808                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3809                         cmd.quotas[idx].max_duration = htole32(0);
3810                 }
3811                 idx++;
3812         }
3813
3814         /* Give the remainder of the session to the first binding */
3815         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3816
3817         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3818             sizeof(cmd), &cmd);
3819         if (ret)
3820                 device_printf(sc->sc_dev,
3821                     "%s: Failed to send quota: %d\n", __func__, ret);
3822         return ret;
3823 }
3824
3825 /*
3826  * END mvm/quota.c
3827  */
3828
3829 /*
3830  * ieee80211 routines
3831  */
3832
3833 /*
3834  * Change to AUTH state in 80211 state machine.  Roughly matches what
3835  * Linux does in bss_info_changed().
3836  */
3837 static int
3838 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3839 {
3840         struct ieee80211_node *ni;
3841         struct iwm_node *in;
3842         struct iwm_vap *iv = IWM_VAP(vap);
3843         uint32_t duration;
3844         int error;
3845
3846         /*
3847          * XXX i have a feeling that the vap node is being
3848          * freed from underneath us. Grr.
3849          */
3850         ni = ieee80211_ref_node(vap->iv_bss);
3851         in = IWM_NODE(ni);
3852         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3853             "%s: called; vap=%p, bss ni=%p\n",
3854             __func__,
3855             vap,
3856             ni);
3857
3858         in->in_assoc = 0;
3859
3860         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3861         if (error != 0)
3862                 return error;
3863
3864         error = iwm_allow_mcast(vap, sc);
3865         if (error) {
3866                 device_printf(sc->sc_dev,
3867                     "%s: failed to set multicast\n", __func__);
3868                 goto out;
3869         }
3870
3871         /*
3872          * This is where it deviates from what Linux does.
3873          *
3874          * Linux iwlwifi doesn't reset the nic each time, nor does it
3875          * call ctxt_add() here.  Instead, it adds it during vap creation,
3876          * and always does a mac_ctx_changed().
3877          *
3878          * The openbsd port doesn't attempt to do that - it reset things
3879          * at odd states and does the add here.
3880          *
3881          * So, until the state handling is fixed (ie, we never reset
3882          * the NIC except for a firmware failure, which should drag
3883          * the NIC back to IDLE, re-setup and re-add all the mac/phy
3884          * contexts that are required), let's do a dirty hack here.
3885          */
3886         if (iv->is_uploaded) {
3887                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3888                         device_printf(sc->sc_dev,
3889                             "%s: failed to update MAC\n", __func__);
3890                         goto out;
3891                 }
3892                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3893                     in->in_ni.ni_chan, 1, 1)) != 0) {
3894                         device_printf(sc->sc_dev,
3895                             "%s: failed update phy ctxt\n", __func__);
3896                         goto out;
3897                 }
3898                 in->in_phyctxt = &sc->sc_phyctxt[0];
3899
3900                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3901                         device_printf(sc->sc_dev,
3902                             "%s: binding update cmd\n", __func__);
3903                         goto out;
3904                 }
3905                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3906                         device_printf(sc->sc_dev,
3907                             "%s: failed to update sta\n", __func__);
3908                         goto out;
3909                 }
3910         } else {
3911                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3912                         device_printf(sc->sc_dev,
3913                             "%s: failed to add MAC\n", __func__);
3914                         goto out;
3915                 }
3916                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3917                     in->in_ni.ni_chan, 1, 1)) != 0) {
3918                         device_printf(sc->sc_dev,
3919                             "%s: failed add phy ctxt!\n", __func__);
3920                         error = ETIMEDOUT;
3921                         goto out;
3922                 }
3923                 in->in_phyctxt = &sc->sc_phyctxt[0];
3924
3925                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3926                         device_printf(sc->sc_dev,
3927                             "%s: binding add cmd\n", __func__);
3928                         goto out;
3929                 }
3930                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3931                         device_printf(sc->sc_dev,
3932                             "%s: failed to add sta\n", __func__);
3933                         goto out;
3934                 }
3935         }
3936
3937         /*
3938          * Prevent the FW from wandering off channel during association
3939          * by "protecting" the session with a time event.
3940          */
3941         /* XXX duration is in units of TU, not MS */
3942         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3943         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3944         DELAY(100);
3945
3946         error = 0;
3947 out:
3948         ieee80211_free_node(ni);
3949         return (error);
3950 }
3951
3952 static int
3953 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3954 {
3955         struct iwm_node *in = IWM_NODE(vap->iv_bss);
3956         int error;
3957
3958         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3959                 device_printf(sc->sc_dev,
3960                     "%s: failed to update STA\n", __func__);
3961                 return error;
3962         }
3963
3964         in->in_assoc = 1;
3965         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3966                 device_printf(sc->sc_dev,
3967                     "%s: failed to update MAC\n", __func__);
3968                 return error;
3969         }
3970
3971         return 0;
3972 }
3973
3974 static int
3975 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3976 {
3977         uint32_t tfd_msk;
3978
3979         /*
3980          * Ok, so *technically* the proper set of calls for going
3981          * from RUN back to SCAN is:
3982          *
3983          * iwm_mvm_power_mac_disable(sc, in);
3984          * iwm_mvm_mac_ctxt_changed(sc, in);
3985          * iwm_mvm_rm_sta(sc, in);
3986          * iwm_mvm_update_quotas(sc, NULL);
3987          * iwm_mvm_mac_ctxt_changed(sc, in);
3988          * iwm_mvm_binding_remove_vif(sc, in);
3989          * iwm_mvm_mac_ctxt_remove(sc, in);
3990          *
3991          * However, that freezes the device not matter which permutations
3992          * and modifications are attempted.  Obviously, this driver is missing
3993          * something since it works in the Linux driver, but figuring out what
3994          * is missing is a little more complicated.  Now, since we're going
3995          * back to nothing anyway, we'll just do a complete device reset.
3996          * Up your's, device!
3997          */
3998         /*
3999          * Just using 0xf for the queues mask is fine as long as we only
4000          * get here from RUN state.
4001          */
4002         tfd_msk = 0xf;
4003         mbufq_drain(&sc->sc_snd);
4004         iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4005         /*
4006          * We seem to get away with just synchronously sending the
4007          * IWM_TXPATH_FLUSH command.
4008          */
4009 //      iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4010         iwm_stop_device(sc);
4011         iwm_init_hw(sc);
4012         if (in)
4013                 in->in_assoc = 0;
4014         return 0;
4015
4016 #if 0
4017         int error;
4018
4019         iwm_mvm_power_mac_disable(sc, in);
4020
4021         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4022                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4023                 return error;
4024         }
4025
4026         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4027                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4028                 return error;
4029         }
4030         error = iwm_mvm_rm_sta(sc, in);
4031         in->in_assoc = 0;
4032         iwm_mvm_update_quotas(sc, NULL);
4033         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4034                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4035                 return error;
4036         }
4037         iwm_mvm_binding_remove_vif(sc, in);
4038
4039         iwm_mvm_mac_ctxt_remove(sc, in);
4040
4041         return error;
4042 #endif
4043 }
4044
4045 static struct ieee80211_node *
4046 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4047 {
4048         return malloc(sizeof (struct iwm_node), M_80211_NODE,
4049             M_NOWAIT | M_ZERO);
4050 }
4051
4052 static void
4053 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4054 {
4055         struct ieee80211_node *ni = &in->in_ni;
4056         struct iwm_lq_cmd *lq = &in->in_lq;
4057         int nrates = ni->ni_rates.rs_nrates;
4058         int i, ridx, tab = 0;
4059 //      int txant = 0;
4060
4061         if (nrates > nitems(lq->rs_table)) {
4062                 device_printf(sc->sc_dev,
4063                     "%s: node supports %d rates, driver handles "
4064                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4065                 return;
4066         }
4067         if (nrates == 0) {
4068                 device_printf(sc->sc_dev,
4069                     "%s: node supports 0 rates, odd!\n", __func__);
4070                 return;
4071         }
4072
4073         /*
4074          * XXX .. and most of iwm_node is not initialised explicitly;
4075          * it's all just 0x0 passed to the firmware.
4076          */
4077
4078         /* first figure out which rates we should support */
4079         /* XXX TODO: this isn't 11n aware /at all/ */
4080         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4081         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4082             "%s: nrates=%d\n", __func__, nrates);
4083
4084         /*
4085          * Loop over nrates and populate in_ridx from the highest
4086          * rate to the lowest rate.  Remember, in_ridx[] has
4087          * IEEE80211_RATE_MAXSIZE entries!
4088          */
4089         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4090                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4091
4092                 /* Map 802.11 rate to HW rate index. */
4093                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4094                         if (iwm_rates[ridx].rate == rate)
4095                                 break;
4096                 if (ridx > IWM_RIDX_MAX) {
4097                         device_printf(sc->sc_dev,
4098                             "%s: WARNING: device rate for %d not found!\n",
4099                             __func__, rate);
4100                 } else {
4101                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4102                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4103                             __func__,
4104                             i,
4105                             rate,
4106                             ridx);
4107                         in->in_ridx[i] = ridx;
4108                 }
4109         }
4110
4111         /* then construct a lq_cmd based on those */
4112         memset(lq, 0, sizeof(*lq));
4113         lq->sta_id = IWM_STATION_ID;
4114
4115         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4116         if (ni->ni_flags & IEEE80211_NODE_HT)
4117                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4118
4119         /*
4120          * are these used? (we don't do SISO or MIMO)
4121          * need to set them to non-zero, though, or we get an error.
4122          */
4123         lq->single_stream_ant_msk = 1;
4124         lq->dual_stream_ant_msk = 1;
4125
4126         /*
4127          * Build the actual rate selection table.
4128          * The lowest bits are the rates.  Additionally,
4129          * CCK needs bit 9 to be set.  The rest of the bits
4130          * we add to the table select the tx antenna
4131          * Note that we add the rates in the highest rate first
4132          * (opposite of ni_rates).
4133          */
4134         /*
4135          * XXX TODO: this should be looping over the min of nrates
4136          * and LQ_MAX_RETRY_NUM.  Sigh.
4137          */
4138         for (i = 0; i < nrates; i++) {
4139                 int nextant;
4140
4141 #if 0
4142                 if (txant == 0)
4143                         txant = iwm_fw_valid_tx_ant(sc);
4144                 nextant = 1<<(ffs(txant)-1);
4145                 txant &= ~nextant;
4146 #else
4147                 nextant = iwm_fw_valid_tx_ant(sc);
4148 #endif
4149                 /*
4150                  * Map the rate id into a rate index into
4151                  * our hardware table containing the
4152                  * configuration to use for this rate.
4153                  */
4154                 ridx = in->in_ridx[i];
4155                 tab = iwm_rates[ridx].plcp;
4156                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4157                 if (IWM_RIDX_IS_CCK(ridx))
4158                         tab |= IWM_RATE_MCS_CCK_MSK;
4159                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4160                     "station rate i=%d, rate=%d, hw=%x\n",
4161                     i, iwm_rates[ridx].rate, tab);
4162                 lq->rs_table[i] = htole32(tab);
4163         }
4164         /* then fill the rest with the lowest possible rate */
4165         for (i = nrates; i < nitems(lq->rs_table); i++) {
4166                 KASSERT(tab != 0, ("invalid tab"));
4167                 lq->rs_table[i] = htole32(tab);
4168         }
4169 }
4170
4171 static int
4172 iwm_media_change(struct ifnet *ifp)
4173 {
4174         struct ieee80211vap *vap = ifp->if_softc;
4175         struct ieee80211com *ic = vap->iv_ic;
4176         struct iwm_softc *sc = ic->ic_softc;
4177         int error;
4178
4179         error = ieee80211_media_change(ifp);
4180         if (error != ENETRESET)
4181                 return error;
4182
4183         IWM_LOCK(sc);
4184         if (ic->ic_nrunning > 0) {
4185                 iwm_stop(sc);
4186                 iwm_init(sc);
4187         }
4188         IWM_UNLOCK(sc);
4189         return error;
4190 }
4191
4192
4193 static int
4194 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4195 {
4196         struct iwm_vap *ivp = IWM_VAP(vap);
4197         struct ieee80211com *ic = vap->iv_ic;
4198         struct iwm_softc *sc = ic->ic_softc;
4199         struct iwm_node *in;
4200         int error;
4201
4202         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4203             "switching state %s -> %s\n",
4204             ieee80211_state_name[vap->iv_state],
4205             ieee80211_state_name[nstate]);
4206         IEEE80211_UNLOCK(ic);
4207         IWM_LOCK(sc);
4208
4209         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4210                 iwm_led_blink_stop(sc);
4211
4212         /* disable beacon filtering if we're hopping out of RUN */
4213         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4214                 iwm_mvm_disable_beacon_filter(sc);
4215
4216                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4217                         in->in_assoc = 0;
4218
4219                 if (nstate == IEEE80211_S_INIT) {
4220                         IWM_UNLOCK(sc);
4221                         IEEE80211_LOCK(ic);
4222                         error = ivp->iv_newstate(vap, nstate, arg);
4223                         IEEE80211_UNLOCK(ic);
4224                         IWM_LOCK(sc);
4225                         iwm_release(sc, NULL);
4226                         IWM_UNLOCK(sc);
4227                         IEEE80211_LOCK(ic);
4228                         return error;
4229                 }
4230
4231                 /*
4232                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4233                  * above then the card will be completely reinitialized,
4234                  * so the driver must do everything necessary to bring the card
4235                  * from INIT to SCAN.
4236                  *
4237                  * Additionally, upon receiving deauth frame from AP,
4238                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4239                  * state. This will also fail with this driver, so bring the FSM
4240                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4241                  *
4242                  * XXX TODO: fix this for FreeBSD!
4243                  */
4244                 if (nstate == IEEE80211_S_SCAN ||
4245                     nstate == IEEE80211_S_AUTH ||
4246                     nstate == IEEE80211_S_ASSOC) {
4247                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4248                             "Force transition to INIT; MGT=%d\n", arg);
4249                         IWM_UNLOCK(sc);
4250                         IEEE80211_LOCK(ic);
4251                         /* Always pass arg as -1 since we can't Tx right now. */
4252                         /*
4253                          * XXX arg is just ignored anyway when transitioning
4254                          *     to IEEE80211_S_INIT.
4255                          */
4256                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4257                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4258                             "Going INIT->SCAN\n");
4259                         nstate = IEEE80211_S_SCAN;
4260                         IEEE80211_UNLOCK(ic);
4261                         IWM_LOCK(sc);
4262                 }
4263         }
4264
4265         switch (nstate) {
4266         case IEEE80211_S_INIT:
4267                 break;
4268
4269         case IEEE80211_S_AUTH:
4270                 if ((error = iwm_auth(vap, sc)) != 0) {
4271                         device_printf(sc->sc_dev,
4272                             "%s: could not move to auth state: %d\n",
4273                             __func__, error);
4274                         break;
4275                 }
4276                 break;
4277
4278         case IEEE80211_S_ASSOC:
4279                 if ((error = iwm_assoc(vap, sc)) != 0) {
4280                         device_printf(sc->sc_dev,
4281                             "%s: failed to associate: %d\n", __func__,
4282                             error);
4283                         break;
4284                 }
4285                 break;
4286
4287         case IEEE80211_S_RUN:
4288         {
4289                 struct iwm_host_cmd cmd = {
4290                         .id = IWM_LQ_CMD,
4291                         .len = { sizeof(in->in_lq), },
4292                         .flags = IWM_CMD_SYNC,
4293                 };
4294
4295                 /* Update the association state, now we have it all */
4296                 /* (eg associd comes in at this point */
4297                 error = iwm_assoc(vap, sc);
4298                 if (error != 0) {
4299                         device_printf(sc->sc_dev,
4300                             "%s: failed to update association state: %d\n",
4301                             __func__,
4302                             error);
4303                         break;
4304                 }
4305
4306                 in = IWM_NODE(vap->iv_bss);
4307                 iwm_mvm_power_mac_update_mode(sc, in);
4308                 iwm_mvm_enable_beacon_filter(sc, in);
4309                 iwm_mvm_update_quotas(sc, in);
4310                 iwm_setrates(sc, in);
4311
4312                 cmd.data[0] = &in->in_lq;
4313                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4314                         device_printf(sc->sc_dev,
4315                             "%s: IWM_LQ_CMD failed\n", __func__);
4316                 }
4317
4318                 iwm_mvm_led_enable(sc);
4319                 break;
4320         }
4321
4322         default:
4323                 break;
4324         }
4325         IWM_UNLOCK(sc);
4326         IEEE80211_LOCK(ic);
4327
4328         return (ivp->iv_newstate(vap, nstate, arg));
4329 }
4330
4331 void
4332 iwm_endscan_cb(void *arg, int pending)
4333 {
4334         struct iwm_softc *sc = arg;
4335         struct ieee80211com *ic = &sc->sc_ic;
4336
4337         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4338             "%s: scan ended\n",
4339             __func__);
4340
4341         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4342 }
4343
4344 /*
4345  * Aging and idle timeouts for the different possible scenarios
4346  * in default configuration
4347  */
4348 static const uint32_t
4349 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4350         {
4351                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4352                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4353         },
4354         {
4355                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4356                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4357         },
4358         {
4359                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4360                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4361         },
4362         {
4363                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4364                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4365         },
4366         {
4367                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4368                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4369         },
4370 };
4371
4372 /*
4373  * Aging and idle timeouts for the different possible scenarios
4374  * in single BSS MAC configuration.
4375  */
4376 static const uint32_t
4377 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4378         {
4379                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4380                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4381         },
4382         {
4383                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4384                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4385         },
4386         {
4387                 htole32(IWM_SF_MCAST_AGING_TIMER),
4388                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4389         },
4390         {
4391                 htole32(IWM_SF_BA_AGING_TIMER),
4392                 htole32(IWM_SF_BA_IDLE_TIMER)
4393         },
4394         {
4395                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4396                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4397         },
4398 };
4399
4400 static void
4401 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4402     struct ieee80211_node *ni)
4403 {
4404         int i, j, watermark;
4405
4406         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4407
4408         /*
4409          * If we are in association flow - check antenna configuration
4410          * capabilities of the AP station, and choose the watermark accordingly.
4411          */
4412         if (ni) {
4413                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4414 #ifdef notyet
4415                         if (ni->ni_rxmcs[2] != 0)
4416                                 watermark = IWM_SF_W_MARK_MIMO3;
4417                         else if (ni->ni_rxmcs[1] != 0)
4418                                 watermark = IWM_SF_W_MARK_MIMO2;
4419                         else
4420 #endif
4421                                 watermark = IWM_SF_W_MARK_SISO;
4422                 } else {
4423                         watermark = IWM_SF_W_MARK_LEGACY;
4424                 }
4425         /* default watermark value for unassociated mode. */
4426         } else {
4427                 watermark = IWM_SF_W_MARK_MIMO2;
4428         }
4429         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4430
4431         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4432                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4433                         sf_cmd->long_delay_timeouts[i][j] =
4434                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4435                 }
4436         }
4437
4438         if (ni) {
4439                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4440                        sizeof(iwm_sf_full_timeout));
4441         } else {
4442                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4443                        sizeof(iwm_sf_full_timeout_def));
4444         }
4445 }
4446
4447 static int
4448 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4449 {
4450         struct ieee80211com *ic = &sc->sc_ic;
4451         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4452         struct iwm_sf_cfg_cmd sf_cmd = {
4453                 .state = htole32(IWM_SF_FULL_ON),
4454         };
4455         int ret = 0;
4456
4457         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
4458                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4459
4460         switch (new_state) {
4461         case IWM_SF_UNINIT:
4462         case IWM_SF_INIT_OFF:
4463                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4464                 break;
4465         case IWM_SF_FULL_ON:
4466                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4467                 break;
4468         default:
4469                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4470                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4471                           new_state);
4472                 return EINVAL;
4473         }
4474
4475         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4476                                    sizeof(sf_cmd), &sf_cmd);
4477         return ret;
4478 }
4479
4480 static int
4481 iwm_send_bt_init_conf(struct iwm_softc *sc)
4482 {
4483         struct iwm_bt_coex_cmd bt_cmd;
4484
4485         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4486         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4487
4488         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4489             &bt_cmd);
4490 }
4491
4492 static int
4493 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4494 {
4495         struct iwm_mcc_update_cmd mcc_cmd;
4496         struct iwm_host_cmd hcmd = {
4497                 .id = IWM_MCC_UPDATE_CMD,
4498                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4499                 .data = { &mcc_cmd },
4500         };
4501         int ret;
4502 #ifdef IWM_DEBUG
4503         struct iwm_rx_packet *pkt;
4504         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4505         struct iwm_mcc_update_resp *mcc_resp;
4506         int n_channels;
4507         uint16_t mcc;
4508 #endif
4509         int resp_v2 = isset(sc->sc_enabled_capa,
4510             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4511
4512         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4513         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4514         if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4515             isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4516                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4517         else
4518                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4519
4520         if (resp_v2)
4521                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4522         else
4523                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4524
4525         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4526             "send MCC update to FW with '%c%c' src = %d\n",
4527             alpha2[0], alpha2[1], mcc_cmd.source_id);
4528
4529         ret = iwm_send_cmd(sc, &hcmd);
4530         if (ret)
4531                 return ret;
4532
4533 #ifdef IWM_DEBUG
4534         pkt = hcmd.resp_pkt;
4535
4536         /* Extract MCC response */
4537         if (resp_v2) {
4538                 mcc_resp = (void *)pkt->data;
4539                 mcc = mcc_resp->mcc;
4540                 n_channels =  le32toh(mcc_resp->n_channels);
4541         } else {
4542                 mcc_resp_v1 = (void *)pkt->data;
4543                 mcc = mcc_resp_v1->mcc;
4544                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4545         }
4546
4547         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4548         if (mcc == 0)
4549                 mcc = 0x3030;  /* "00" - world */
4550
4551         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4552             "regulatory domain '%c%c' (%d channels available)\n",
4553             mcc >> 8, mcc & 0xff, n_channels);
4554 #endif
4555         iwm_free_resp(sc, &hcmd);
4556
4557         return 0;
4558 }
4559
4560 static void
4561 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4562 {
4563         struct iwm_host_cmd cmd = {
4564                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4565                 .len = { sizeof(uint32_t), },
4566                 .data = { &backoff, },
4567         };
4568
4569         if (iwm_send_cmd(sc, &cmd) != 0) {
4570                 device_printf(sc->sc_dev,
4571                     "failed to change thermal tx backoff\n");
4572         }
4573 }
4574
4575 static int
4576 iwm_init_hw(struct iwm_softc *sc)
4577 {
4578         struct ieee80211com *ic = &sc->sc_ic;
4579         int error, i, ac;
4580
4581         if ((error = iwm_start_hw(sc)) != 0) {
4582                 printf("iwm_start_hw: failed %d\n", error);
4583                 return error;
4584         }
4585
4586         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4587                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4588                 return error;
4589         }
4590
4591         /*
4592          * should stop and start HW since that INIT
4593          * image just loaded
4594          */
4595         iwm_stop_device(sc);
4596         if ((error = iwm_start_hw(sc)) != 0) {
4597                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4598                 return error;
4599         }
4600
4601         /* omstart, this time with the regular firmware */
4602         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
4603         if (error) {
4604                 device_printf(sc->sc_dev, "could not load firmware\n");
4605                 goto error;
4606         }
4607
4608         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4609                 device_printf(sc->sc_dev, "bt init conf failed\n");
4610                 goto error;
4611         }
4612
4613         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
4614                 device_printf(sc->sc_dev, "antenna config failed\n");
4615                 goto error;
4616         }
4617
4618         /* Send phy db control command and then phy db calibration*/
4619         if ((error = iwm_send_phy_db_data(sc)) != 0) {
4620                 device_printf(sc->sc_dev, "phy_db_data failed\n");
4621                 goto error;
4622         }
4623
4624         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4625                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4626                 goto error;
4627         }
4628
4629         /* Add auxiliary station for scanning */
4630         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4631                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4632                 goto error;
4633         }
4634
4635         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4636                 /*
4637                  * The channel used here isn't relevant as it's
4638                  * going to be overwritten in the other flows.
4639                  * For now use the first channel we have.
4640                  */
4641                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4642                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4643                         goto error;
4644         }
4645
4646         /* Initialize tx backoffs to the minimum. */
4647         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
4648                 iwm_mvm_tt_tx_backoff(sc, 0);
4649
4650         error = iwm_mvm_power_update_device(sc);
4651         if (error)
4652                 goto error;
4653
4654         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4655                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4656                         goto error;
4657         }
4658
4659         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4660                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4661                         goto error;
4662         }
4663
4664         /* Enable Tx queues. */
4665         for (ac = 0; ac < WME_NUM_AC; ac++) {
4666                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4667                     iwm_mvm_ac_to_tx_fifo[ac]);
4668                 if (error)
4669                         goto error;
4670         }
4671
4672         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4673                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4674                 goto error;
4675         }
4676
4677         return 0;
4678
4679  error:
4680         iwm_stop_device(sc);
4681         return error;
4682 }
4683
4684 /* Allow multicast from our BSSID. */
4685 static int
4686 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4687 {
4688         struct ieee80211_node *ni = vap->iv_bss;
4689         struct iwm_mcast_filter_cmd *cmd;
4690         size_t size;
4691         int error;
4692
4693         size = roundup(sizeof(*cmd), 4);
4694         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4695         if (cmd == NULL)
4696                 return ENOMEM;
4697         cmd->filter_own = 1;
4698         cmd->port_id = 0;
4699         cmd->count = 0;
4700         cmd->pass_all = 1;
4701         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4702
4703         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4704             IWM_CMD_SYNC, size, cmd);
4705         free(cmd, M_DEVBUF);
4706
4707         return (error);
4708 }
4709
4710 /*
4711  * ifnet interfaces
4712  */
4713
4714 static void
4715 iwm_init(struct iwm_softc *sc)
4716 {
4717         int error;
4718
4719         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4720                 return;
4721         }
4722         sc->sc_generation++;
4723         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4724
4725         if ((error = iwm_init_hw(sc)) != 0) {
4726                 printf("iwm_init_hw failed %d\n", error);
4727                 iwm_stop(sc);
4728                 return;
4729         }
4730
4731         /*
4732          * Ok, firmware loaded and we are jogging
4733          */
4734         sc->sc_flags |= IWM_FLAG_HW_INITED;
4735         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4736 }
4737
4738 static int
4739 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4740 {
4741         struct iwm_softc *sc;
4742         int error;
4743
4744         sc = ic->ic_softc;
4745
4746         IWM_LOCK(sc);
4747         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4748                 IWM_UNLOCK(sc);
4749                 return (ENXIO);
4750         }
4751         error = mbufq_enqueue(&sc->sc_snd, m);
4752         if (error) {
4753                 IWM_UNLOCK(sc);
4754                 return (error);
4755         }
4756         iwm_start(sc);
4757         IWM_UNLOCK(sc);
4758         return (0);
4759 }
4760
4761 /*
4762  * Dequeue packets from sendq and call send.
4763  */
4764 static void
4765 iwm_start(struct iwm_softc *sc)
4766 {
4767         struct ieee80211_node *ni;
4768         struct mbuf *m;
4769         int ac = 0;
4770
4771         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4772         while (sc->qfullmsk == 0 &&
4773                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4774                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4775                 if (iwm_tx(sc, m, ni, ac) != 0) {
4776                         if_inc_counter(ni->ni_vap->iv_ifp,
4777                             IFCOUNTER_OERRORS, 1);
4778                         ieee80211_free_node(ni);
4779                         continue;
4780                 }
4781                 sc->sc_tx_timer = 15;
4782         }
4783         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4784 }
4785
4786 static void
4787 iwm_stop(struct iwm_softc *sc)
4788 {
4789
4790         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4791         sc->sc_flags |= IWM_FLAG_STOPPED;
4792         sc->sc_generation++;
4793         iwm_led_blink_stop(sc);
4794         sc->sc_tx_timer = 0;
4795         iwm_stop_device(sc);
4796 }
4797
4798 static void
4799 iwm_watchdog(void *arg)
4800 {
4801         struct iwm_softc *sc = arg;
4802         struct ieee80211com *ic = &sc->sc_ic;
4803
4804         if (sc->sc_tx_timer > 0) {
4805                 if (--sc->sc_tx_timer == 0) {
4806                         device_printf(sc->sc_dev, "device timeout\n");
4807 #ifdef IWM_DEBUG
4808                         iwm_nic_error(sc);
4809 #endif
4810                         ieee80211_restart_all(ic);
4811                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4812                         return;
4813                 }
4814         }
4815         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4816 }
4817
4818 static void
4819 iwm_parent(struct ieee80211com *ic)
4820 {
4821         struct iwm_softc *sc = ic->ic_softc;
4822         int startall = 0;
4823
4824         IWM_LOCK(sc);
4825         if (ic->ic_nrunning > 0) {
4826                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4827                         iwm_init(sc);
4828                         startall = 1;
4829                 }
4830         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4831                 iwm_stop(sc);
4832         IWM_UNLOCK(sc);
4833         if (startall)
4834                 ieee80211_start_all(ic);
4835 }
4836
4837 /*
4838  * The interrupt side of things
4839  */
4840
4841 /*
4842  * error dumping routines are from iwlwifi/mvm/utils.c
4843  */
4844
4845 /*
4846  * Note: This structure is read from the device with IO accesses,
4847  * and the reading already does the endian conversion. As it is
4848  * read with uint32_t-sized accesses, any members with a different size
4849  * need to be ordered correctly though!
4850  */
4851 struct iwm_error_event_table {
4852         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4853         uint32_t error_id;              /* type of error */
4854         uint32_t trm_hw_status0;        /* TRM HW status */
4855         uint32_t trm_hw_status1;        /* TRM HW status */
4856         uint32_t blink2;                /* branch link */
4857         uint32_t ilink1;                /* interrupt link */
4858         uint32_t ilink2;                /* interrupt link */
4859         uint32_t data1;         /* error-specific data */
4860         uint32_t data2;         /* error-specific data */
4861         uint32_t data3;         /* error-specific data */
4862         uint32_t bcon_time;             /* beacon timer */
4863         uint32_t tsf_low;               /* network timestamp function timer */
4864         uint32_t tsf_hi;                /* network timestamp function timer */
4865         uint32_t gp1;           /* GP1 timer register */
4866         uint32_t gp2;           /* GP2 timer register */
4867         uint32_t fw_rev_type;   /* firmware revision type */
4868         uint32_t major;         /* uCode version major */
4869         uint32_t minor;         /* uCode version minor */
4870         uint32_t hw_ver;                /* HW Silicon version */
4871         uint32_t brd_ver;               /* HW board version */
4872         uint32_t log_pc;                /* log program counter */
4873         uint32_t frame_ptr;             /* frame pointer */
4874         uint32_t stack_ptr;             /* stack pointer */
4875         uint32_t hcmd;          /* last host command header */
4876         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4877                                  * rxtx_flag */
4878         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4879                                  * host_flag */
4880         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4881                                  * enc_flag */
4882         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4883                                  * time_flag */
4884         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4885                                  * wico interrupt */
4886         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
4887         uint32_t wait_event;            /* wait event() caller address */
4888         uint32_t l2p_control;   /* L2pControlField */
4889         uint32_t l2p_duration;  /* L2pDurationField */
4890         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4891         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4892         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4893                                  * (LMPM_PMG_SEL) */
4894         uint32_t u_timestamp;   /* indicate when the date and time of the
4895                                  * compilation */
4896         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
4897 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4898
4899 /*
4900  * UMAC error struct - relevant starting from family 8000 chip.
4901  * Note: This structure is read from the device with IO accesses,
4902  * and the reading already does the endian conversion. As it is
4903  * read with u32-sized accesses, any members with a different size
4904  * need to be ordered correctly though!
4905  */
4906 struct iwm_umac_error_event_table {
4907         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4908         uint32_t error_id;      /* type of error */
4909         uint32_t blink1;        /* branch link */
4910         uint32_t blink2;        /* branch link */
4911         uint32_t ilink1;        /* interrupt link */
4912         uint32_t ilink2;        /* interrupt link */
4913         uint32_t data1;         /* error-specific data */
4914         uint32_t data2;         /* error-specific data */
4915         uint32_t data3;         /* error-specific data */
4916         uint32_t umac_major;
4917         uint32_t umac_minor;
4918         uint32_t frame_pointer; /* core register 27*/
4919         uint32_t stack_pointer; /* core register 28 */
4920         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
4921         uint32_t nic_isr_pref;  /* ISR status register */
4922 } __packed;
4923
4924 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4925 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4926
4927 #ifdef IWM_DEBUG
4928 struct {
4929         const char *name;
4930         uint8_t num;
4931 } advanced_lookup[] = {
4932         { "NMI_INTERRUPT_WDG", 0x34 },
4933         { "SYSASSERT", 0x35 },
4934         { "UCODE_VERSION_MISMATCH", 0x37 },
4935         { "BAD_COMMAND", 0x38 },
4936         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4937         { "FATAL_ERROR", 0x3D },
4938         { "NMI_TRM_HW_ERR", 0x46 },
4939         { "NMI_INTERRUPT_TRM", 0x4C },
4940         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4941         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4942         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4943         { "NMI_INTERRUPT_HOST", 0x66 },
4944         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4945         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4946         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4947         { "ADVANCED_SYSASSERT", 0 },
4948 };
4949
4950 static const char *
4951 iwm_desc_lookup(uint32_t num)
4952 {
4953         int i;
4954
4955         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4956                 if (advanced_lookup[i].num == num)
4957                         return advanced_lookup[i].name;
4958
4959         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4960         return advanced_lookup[i].name;
4961 }
4962
4963 static void
4964 iwm_nic_umac_error(struct iwm_softc *sc)
4965 {
4966         struct iwm_umac_error_event_table table;
4967         uint32_t base;
4968
4969         base = sc->sc_uc.uc_umac_error_event_table;
4970
4971         if (base < 0x800000) {
4972                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
4973                     base);
4974                 return;
4975         }
4976
4977         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4978                 device_printf(sc->sc_dev, "reading errlog failed\n");
4979                 return;
4980         }
4981
4982         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4983                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
4984                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4985                     sc->sc_flags, table.valid);
4986         }
4987
4988         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
4989                 iwm_desc_lookup(table.error_id));
4990         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
4991         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
4992         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
4993             table.ilink1);
4994         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
4995             table.ilink2);
4996         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
4997         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
4998         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
4999         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5000         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5001         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5002             table.frame_pointer);
5003         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5004             table.stack_pointer);
5005         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5006         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5007             table.nic_isr_pref);
5008 }
5009
5010 /*
5011  * Support for dumping the error log seemed like a good idea ...
5012  * but it's mostly hex junk and the only sensible thing is the
5013  * hw/ucode revision (which we know anyway).  Since it's here,
5014  * I'll just leave it in, just in case e.g. the Intel guys want to
5015  * help us decipher some "ADVANCED_SYSASSERT" later.
5016  */
5017 static void
5018 iwm_nic_error(struct iwm_softc *sc)
5019 {
5020         struct iwm_error_event_table table;
5021         uint32_t base;
5022
5023         device_printf(sc->sc_dev, "dumping device error log\n");
5024         base = sc->sc_uc.uc_error_event_table;
5025         if (base < 0x800000) {
5026                 device_printf(sc->sc_dev,
5027                     "Invalid error log pointer 0x%08x\n", base);
5028                 return;
5029         }
5030
5031         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5032                 device_printf(sc->sc_dev, "reading errlog failed\n");
5033                 return;
5034         }
5035
5036         if (!table.valid) {
5037                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5038                 return;
5039         }
5040
5041         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5042                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5043                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5044                     sc->sc_flags, table.valid);
5045         }
5046
5047         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5048             iwm_desc_lookup(table.error_id));
5049         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5050             table.trm_hw_status0);
5051         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5052             table.trm_hw_status1);
5053         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5054         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5055         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5056         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5057         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5058         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5059         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5060         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5061         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5062         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5063         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5064         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5065             table.fw_rev_type);
5066         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5067         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5068         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5069         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5070         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5071         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5072         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5073         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5074         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5075         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5076         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5077         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5078         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5079         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5080         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5081         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5082         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5083         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5084         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5085
5086         if (sc->sc_uc.uc_umac_error_event_table)
5087                 iwm_nic_umac_error(sc);
5088 }
5089 #endif
5090
5091 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5092
5093 /*
5094  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5095  * Basic structure from if_iwn
5096  */
5097 static void
5098 iwm_notif_intr(struct iwm_softc *sc)
5099 {
5100         struct ieee80211com *ic = &sc->sc_ic;
5101         uint16_t hw;
5102
5103         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5104             BUS_DMASYNC_POSTREAD);
5105
5106         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5107
5108         /*
5109          * Process responses
5110          */
5111         while (sc->rxq.cur != hw) {
5112                 struct iwm_rx_ring *ring = &sc->rxq;
5113                 struct iwm_rx_data *data = &ring->data[ring->cur];
5114                 struct iwm_rx_packet *pkt;
5115                 struct iwm_cmd_response *cresp;
5116                 int qid, idx, code;
5117
5118                 bus_dmamap_sync(ring->data_dmat, data->map,
5119                     BUS_DMASYNC_POSTREAD);
5120                 pkt = mtod(data->m, struct iwm_rx_packet *);
5121
5122                 qid = pkt->hdr.qid & ~0x80;
5123                 idx = pkt->hdr.idx;
5124
5125                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5126                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5127                     "rx packet qid=%d idx=%d type=%x %d %d\n",
5128                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5129
5130                 /*
5131                  * randomly get these from the firmware, no idea why.
5132                  * they at least seem harmless, so just ignore them for now
5133                  */
5134                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5135                     || pkt->len_n_flags == htole32(0x55550000))) {
5136                         ADVANCE_RXQ(sc);
5137                         continue;
5138                 }
5139
5140                 switch (code) {
5141                 case IWM_REPLY_RX_PHY_CMD:
5142                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5143                         break;
5144
5145                 case IWM_REPLY_RX_MPDU_CMD:
5146                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5147                         break;
5148
5149                 case IWM_TX_CMD:
5150                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
5151                         break;
5152
5153                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5154                         struct iwm_missed_beacons_notif *resp;
5155                         int missed;
5156
5157                         /* XXX look at mac_id to determine interface ID */
5158                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5159
5160                         resp = (void *)pkt->data;
5161                         missed = le32toh(resp->consec_missed_beacons);
5162
5163                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5164                             "%s: MISSED_BEACON: mac_id=%d, "
5165                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5166                             "num_rx=%d\n",
5167                             __func__,
5168                             le32toh(resp->mac_id),
5169                             le32toh(resp->consec_missed_beacons_since_last_rx),
5170                             le32toh(resp->consec_missed_beacons),
5171                             le32toh(resp->num_expected_beacons),
5172                             le32toh(resp->num_recvd_beacons));
5173
5174                         /* Be paranoid */
5175                         if (vap == NULL)
5176                                 break;
5177
5178                         /* XXX no net80211 locking? */
5179                         if (vap->iv_state == IEEE80211_S_RUN &&
5180                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5181                                 if (missed > vap->iv_bmissthreshold) {
5182                                         /* XXX bad locking; turn into task */
5183                                         IWM_UNLOCK(sc);
5184                                         ieee80211_beacon_miss(ic);
5185                                         IWM_LOCK(sc);
5186                                 }
5187                         }
5188
5189                         break; }
5190
5191                 case IWM_MFUART_LOAD_NOTIFICATION:
5192                         break;
5193
5194                 case IWM_MVM_ALIVE: {
5195                         struct iwm_mvm_alive_resp_v1 *resp1;
5196                         struct iwm_mvm_alive_resp_v2 *resp2;
5197                         struct iwm_mvm_alive_resp_v3 *resp3;
5198
5199                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
5200                                 resp1 = (void *)pkt->data;
5201                                 sc->sc_uc.uc_error_event_table
5202                                     = le32toh(resp1->error_event_table_ptr);
5203                                 sc->sc_uc.uc_log_event_table
5204                                     = le32toh(resp1->log_event_table_ptr);
5205                                 sc->sched_base = le32toh(resp1->scd_base_ptr);
5206                                 if (resp1->status == IWM_ALIVE_STATUS_OK)
5207                                         sc->sc_uc.uc_ok = 1;
5208                                 else
5209                                         sc->sc_uc.uc_ok = 0;
5210                         }
5211
5212                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
5213                                 resp2 = (void *)pkt->data;
5214                                 sc->sc_uc.uc_error_event_table
5215                                     = le32toh(resp2->error_event_table_ptr);
5216                                 sc->sc_uc.uc_log_event_table
5217                                     = le32toh(resp2->log_event_table_ptr);
5218                                 sc->sched_base = le32toh(resp2->scd_base_ptr);
5219                                 sc->sc_uc.uc_umac_error_event_table
5220                                     = le32toh(resp2->error_info_addr);
5221                                 if (resp2->status == IWM_ALIVE_STATUS_OK)
5222                                         sc->sc_uc.uc_ok = 1;
5223                                 else
5224                                         sc->sc_uc.uc_ok = 0;
5225                         }
5226
5227                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
5228                                 resp3 = (void *)pkt->data;
5229                                 sc->sc_uc.uc_error_event_table
5230                                     = le32toh(resp3->error_event_table_ptr);
5231                                 sc->sc_uc.uc_log_event_table
5232                                     = le32toh(resp3->log_event_table_ptr);
5233                                 sc->sched_base = le32toh(resp3->scd_base_ptr);
5234                                 sc->sc_uc.uc_umac_error_event_table
5235                                     = le32toh(resp3->error_info_addr);
5236                                 if (resp3->status == IWM_ALIVE_STATUS_OK)
5237                                         sc->sc_uc.uc_ok = 1;
5238                                 else
5239                                         sc->sc_uc.uc_ok = 0;
5240                         }
5241
5242                         sc->sc_uc.uc_intr = 1;
5243                         wakeup(&sc->sc_uc);
5244                         break; }
5245
5246                 case IWM_CALIB_RES_NOTIF_PHY_DB: {
5247                         struct iwm_calib_res_notif_phy_db *phy_db_notif;
5248                         phy_db_notif = (void *)pkt->data;
5249
5250                         iwm_phy_db_set_section(sc, phy_db_notif);
5251
5252                         break; }
5253
5254                 case IWM_STATISTICS_NOTIFICATION: {
5255                         struct iwm_notif_statistics *stats;
5256                         stats = (void *)pkt->data;
5257                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5258                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5259                         break; }
5260
5261                 case IWM_NVM_ACCESS_CMD:
5262                 case IWM_MCC_UPDATE_CMD:
5263                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5264                                 memcpy(sc->sc_cmd_resp,
5265                                     pkt, sizeof(sc->sc_cmd_resp));
5266                         }
5267                         break;
5268
5269                 case IWM_MCC_CHUB_UPDATE_CMD: {
5270                         struct iwm_mcc_chub_notif *notif;
5271                         notif = (void *)pkt->data;
5272
5273                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5274                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5275                         sc->sc_fw_mcc[2] = '\0';
5276                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5277                             "fw source %d sent CC '%s'\n",
5278                             notif->source_id, sc->sc_fw_mcc);
5279                         break; }
5280
5281                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5282                         break;
5283
5284                 case IWM_PHY_CONFIGURATION_CMD:
5285                 case IWM_TX_ANT_CONFIGURATION_CMD:
5286                 case IWM_ADD_STA:
5287                 case IWM_MAC_CONTEXT_CMD:
5288                 case IWM_REPLY_SF_CFG_CMD:
5289                 case IWM_POWER_TABLE_CMD:
5290                 case IWM_PHY_CONTEXT_CMD:
5291                 case IWM_BINDING_CONTEXT_CMD:
5292                 case IWM_TIME_EVENT_CMD:
5293                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5294                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5295                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5296                 case IWM_REPLY_BEACON_FILTERING_CMD:
5297                 case IWM_MAC_PM_POWER_TABLE:
5298                 case IWM_TIME_QUOTA_CMD:
5299                 case IWM_REMOVE_STA:
5300                 case IWM_TXPATH_FLUSH:
5301                 case IWM_LQ_CMD:
5302                 case IWM_BT_CONFIG:
5303                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5304                         cresp = (void *)pkt->data;
5305                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5306                                 memcpy(sc->sc_cmd_resp,
5307                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5308                         }
5309                         break;
5310
5311                 /* ignore */
5312                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5313                         break;
5314
5315                 case IWM_INIT_COMPLETE_NOTIF:
5316                         sc->sc_init_complete = 1;
5317                         wakeup(&sc->sc_init_complete);
5318                         break;
5319
5320                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5321                         struct iwm_periodic_scan_complete *notif;
5322                         notif = (void *)pkt->data;
5323                         break;
5324                 }
5325
5326                 case IWM_SCAN_ITERATION_COMPLETE: {
5327                         struct iwm_lmac_scan_complete_notif *notif;
5328                         notif = (void *)pkt->data;
5329                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5330                         break;
5331                 }
5332  
5333                 case IWM_SCAN_COMPLETE_UMAC: {
5334                         struct iwm_umac_scan_complete *notif;
5335                         notif = (void *)pkt->data;
5336
5337                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5338                             "UMAC scan complete, status=0x%x\n",
5339                             notif->status);
5340 #if 0   /* XXX This would be a duplicate scan end call */
5341                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5342 #endif
5343                         break;
5344                 }
5345
5346                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5347                         struct iwm_umac_scan_iter_complete_notif *notif;
5348                         notif = (void *)pkt->data;
5349
5350                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5351                             "complete, status=0x%x, %d channels scanned\n",
5352                             notif->status, notif->scanned_channels);
5353                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5354                         break;
5355                 }
5356
5357                 case IWM_REPLY_ERROR: {
5358                         struct iwm_error_resp *resp;
5359                         resp = (void *)pkt->data;
5360
5361                         device_printf(sc->sc_dev,
5362                             "firmware error 0x%x, cmd 0x%x\n",
5363                             le32toh(resp->error_type),
5364                             resp->cmd_id);
5365                         break;
5366                 }
5367
5368                 case IWM_TIME_EVENT_NOTIFICATION: {
5369                         struct iwm_time_event_notif *notif;
5370                         notif = (void *)pkt->data;
5371
5372                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5373                             "TE notif status = 0x%x action = 0x%x\n",
5374                             notif->status, notif->action);
5375                         break;
5376                 }
5377
5378                 case IWM_MCAST_FILTER_CMD:
5379                         break;
5380
5381                 case IWM_SCD_QUEUE_CFG: {
5382                         struct iwm_scd_txq_cfg_rsp *rsp;
5383                         rsp = (void *)pkt->data;
5384
5385                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5386                             "queue cfg token=0x%x sta_id=%d "
5387                             "tid=%d scd_queue=%d\n",
5388                             rsp->token, rsp->sta_id, rsp->tid,
5389                             rsp->scd_queue);
5390                         break;
5391                 }
5392
5393                 default:
5394                         device_printf(sc->sc_dev,
5395                             "frame %d/%d %x UNHANDLED (this should "
5396                             "not happen)\n", qid, idx,
5397                             pkt->len_n_flags);
5398                         break;
5399                 }
5400
5401                 /*
5402                  * Why test bit 0x80?  The Linux driver:
5403                  *
5404                  * There is one exception:  uCode sets bit 15 when it
5405                  * originates the response/notification, i.e. when the
5406                  * response/notification is not a direct response to a
5407                  * command sent by the driver.  For example, uCode issues
5408                  * IWM_REPLY_RX when it sends a received frame to the driver;
5409                  * it is not a direct response to any driver command.
5410                  *
5411                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5412                  * uses a slightly different format for pkt->hdr, and "qid"
5413                  * is actually the upper byte of a two-byte field.
5414                  */
5415                 if (!(pkt->hdr.qid & (1 << 7))) {
5416                         iwm_cmd_done(sc, pkt);
5417                 }
5418
5419                 ADVANCE_RXQ(sc);
5420         }
5421
5422         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
5423             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5424
5425         /*
5426          * Tell the firmware what we have processed.
5427          * Seems like the hardware gets upset unless we align
5428          * the write by 8??
5429          */
5430         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5431         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5432 }
5433
5434 static void
5435 iwm_intr(void *arg)
5436 {
5437         struct iwm_softc *sc = arg;
5438         int handled = 0;
5439         int r1, r2, rv = 0;
5440         int isperiodic = 0;
5441
5442         IWM_LOCK(sc);
5443         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5444
5445         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5446                 uint32_t *ict = sc->ict_dma.vaddr;
5447                 int tmp;
5448
5449                 tmp = htole32(ict[sc->ict_cur]);
5450                 if (!tmp)
5451                         goto out_ena;
5452
5453                 /*
5454                  * ok, there was something.  keep plowing until we have all.
5455                  */
5456                 r1 = r2 = 0;
5457                 while (tmp) {
5458                         r1 |= tmp;
5459                         ict[sc->ict_cur] = 0;
5460                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5461                         tmp = htole32(ict[sc->ict_cur]);
5462                 }
5463
5464                 /* this is where the fun begins.  don't ask */
5465                 if (r1 == 0xffffffff)
5466                         r1 = 0;
5467
5468                 /* i am not expected to understand this */
5469                 if (r1 & 0xc0000)
5470                         r1 |= 0x8000;
5471                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5472         } else {
5473                 r1 = IWM_READ(sc, IWM_CSR_INT);
5474                 /* "hardware gone" (where, fishing?) */
5475                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5476                         goto out;
5477                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5478         }
5479         if (r1 == 0 && r2 == 0) {
5480                 goto out_ena;
5481         }
5482
5483         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5484
5485         /* ignored */
5486         handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
5487
5488         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5489                 int i;
5490                 struct ieee80211com *ic = &sc->sc_ic;
5491                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5492
5493 #ifdef IWM_DEBUG
5494                 iwm_nic_error(sc);
5495 #endif
5496                 /* Dump driver status (TX and RX rings) while we're here. */
5497                 device_printf(sc->sc_dev, "driver status:\n");
5498                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5499                         struct iwm_tx_ring *ring = &sc->txq[i];
5500                         device_printf(sc->sc_dev,
5501                             "  tx ring %2d: qid=%-2d cur=%-3d "
5502                             "queued=%-3d\n",
5503                             i, ring->qid, ring->cur, ring->queued);
5504                 }
5505                 device_printf(sc->sc_dev,
5506                     "  rx ring: cur=%d\n", sc->rxq.cur);
5507                 device_printf(sc->sc_dev,
5508                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5509
5510                 /* Don't stop the device; just do a VAP restart */
5511                 IWM_UNLOCK(sc);
5512
5513                 if (vap == NULL) {
5514                         printf("%s: null vap\n", __func__);
5515                         return;
5516                 }
5517
5518                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5519                     "restarting\n", __func__, vap->iv_state);
5520
5521                 /* XXX TODO: turn this into a callout/taskqueue */
5522                 ieee80211_restart_all(ic);
5523                 return;
5524         }
5525
5526         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5527                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5528                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5529                 iwm_stop(sc);
5530                 rv = 1;
5531                 goto out;
5532         }
5533
5534         /* firmware chunk loaded */
5535         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5536                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5537                 handled |= IWM_CSR_INT_BIT_FH_TX;
5538                 sc->sc_fw_chunk_done = 1;
5539                 wakeup(&sc->sc_fw);
5540         }
5541
5542         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5543                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5544                 if (iwm_check_rfkill(sc)) {
5545                         device_printf(sc->sc_dev,
5546                             "%s: rfkill switch, disabling interface\n",
5547                             __func__);
5548                         iwm_stop(sc);
5549                 }
5550         }
5551
5552         /*
5553          * The Linux driver uses periodic interrupts to avoid races.
5554          * We cargo-cult like it's going out of fashion.
5555          */
5556         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5557                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5558                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5559                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5560                         IWM_WRITE_1(sc,
5561                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5562                 isperiodic = 1;
5563         }
5564
5565         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5566                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5567                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5568
5569                 iwm_notif_intr(sc);
5570
5571                 /* enable periodic interrupt, see above */
5572                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5573                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5574                             IWM_CSR_INT_PERIODIC_ENA);
5575         }
5576
5577         if (__predict_false(r1 & ~handled))
5578                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5579                     "%s: unhandled interrupts: %x\n", __func__, r1);
5580         rv = 1;
5581
5582  out_ena:
5583         iwm_restore_interrupts(sc);
5584  out:
5585         IWM_UNLOCK(sc);
5586         return;
5587 }
5588
5589 /*
5590  * Autoconf glue-sniffing
5591  */
5592 #define PCI_VENDOR_INTEL                0x8086
5593 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5594 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5595 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5596 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5597 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5598 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5599 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5600 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5601 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5602 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5603
5604 static const struct iwm_devices {
5605         uint16_t        device;
5606         const char      *name;
5607 } iwm_devices[] = {
5608         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5609         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5610         { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5611         { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5612         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5613         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5614         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5615         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5616         { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5617         { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5618 };
5619
5620 static int
5621 iwm_probe(device_t dev)
5622 {
5623         int i;
5624
5625         for (i = 0; i < nitems(iwm_devices); i++) {
5626                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5627                     pci_get_device(dev) == iwm_devices[i].device) {
5628                         device_set_desc(dev, iwm_devices[i].name);
5629                         return (BUS_PROBE_DEFAULT);
5630                 }
5631         }
5632
5633         return (ENXIO);
5634 }
5635
5636 static int
5637 iwm_dev_check(device_t dev)
5638 {
5639         struct iwm_softc *sc;
5640
5641         sc = device_get_softc(dev);
5642
5643         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5644         switch (pci_get_device(dev)) {
5645         case PCI_PRODUCT_INTEL_WL_3160_1:
5646         case PCI_PRODUCT_INTEL_WL_3160_2:
5647                 sc->sc_fwname = "iwm3160fw";
5648                 sc->host_interrupt_operation_mode = 1;
5649                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5650                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5651                 return (0);
5652         case PCI_PRODUCT_INTEL_WL_3165_1:
5653         case PCI_PRODUCT_INTEL_WL_3165_2:
5654                 sc->sc_fwname = "iwm7265fw";
5655                 sc->host_interrupt_operation_mode = 0;
5656                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5657                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5658                 return (0);
5659         case PCI_PRODUCT_INTEL_WL_7260_1:
5660         case PCI_PRODUCT_INTEL_WL_7260_2:
5661                 sc->sc_fwname = "iwm7260fw";
5662                 sc->host_interrupt_operation_mode = 1;
5663                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5664                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5665                 return (0);
5666         case PCI_PRODUCT_INTEL_WL_7265_1:
5667         case PCI_PRODUCT_INTEL_WL_7265_2:
5668                 sc->sc_fwname = "iwm7265fw";
5669                 sc->host_interrupt_operation_mode = 0;
5670                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5671                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5672                 return (0);
5673         case PCI_PRODUCT_INTEL_WL_8260_1:
5674         case PCI_PRODUCT_INTEL_WL_8260_2:
5675                 sc->sc_fwname = "iwm8000Cfw";
5676                 sc->host_interrupt_operation_mode = 0;
5677                 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
5678                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
5679                 return (0);
5680         default:
5681                 device_printf(dev, "unknown adapter type\n");
5682                 return ENXIO;
5683         }
5684 }
5685
5686 static int
5687 iwm_pci_attach(device_t dev)
5688 {
5689         struct iwm_softc *sc;
5690         int count, error, rid;
5691         uint16_t reg;
5692
5693         sc = device_get_softc(dev);
5694
5695         /* Clear device-specific "PCI retry timeout" register (41h). */
5696         reg = pci_read_config(dev, 0x40, sizeof(reg));
5697         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5698
5699         /* Enable bus-mastering and hardware bug workaround. */
5700         pci_enable_busmaster(dev);
5701         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5702         /* if !MSI */
5703         if (reg & PCIM_STATUS_INTxSTATE) {
5704                 reg &= ~PCIM_STATUS_INTxSTATE;
5705         }
5706         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5707
5708         rid = PCIR_BAR(0);
5709         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5710             RF_ACTIVE);
5711         if (sc->sc_mem == NULL) {
5712                 device_printf(sc->sc_dev, "can't map mem space\n");
5713                 return (ENXIO);
5714         }
5715         sc->sc_st = rman_get_bustag(sc->sc_mem);
5716         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5717
5718         /* Install interrupt handler. */
5719         count = 1;
5720         rid = 0;
5721         if (pci_alloc_msi(dev, &count) == 0)
5722                 rid = 1;
5723         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5724             (rid != 0 ? 0 : RF_SHAREABLE));
5725         if (sc->sc_irq == NULL) {
5726                 device_printf(dev, "can't map interrupt\n");
5727                         return (ENXIO);
5728         }
5729         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5730             NULL, iwm_intr, sc, &sc->sc_ih);
5731         if (sc->sc_ih == NULL) {
5732                 device_printf(dev, "can't establish interrupt");
5733                         return (ENXIO);
5734         }
5735         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5736
5737         return (0);
5738 }
5739
5740 static void
5741 iwm_pci_detach(device_t dev)
5742 {
5743         struct iwm_softc *sc = device_get_softc(dev);
5744
5745         if (sc->sc_irq != NULL) {
5746                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5747                 bus_release_resource(dev, SYS_RES_IRQ,
5748                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5749                 pci_release_msi(dev);
5750         }
5751         if (sc->sc_mem != NULL)
5752                 bus_release_resource(dev, SYS_RES_MEMORY,
5753                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5754 }
5755
5756
5757
5758 static int
5759 iwm_attach(device_t dev)
5760 {
5761         struct iwm_softc *sc = device_get_softc(dev);
5762         struct ieee80211com *ic = &sc->sc_ic;
5763         int error;
5764         int txq_i, i;
5765
5766         sc->sc_dev = dev;
5767         IWM_LOCK_INIT(sc);
5768         mbufq_init(&sc->sc_snd, ifqmaxlen);
5769         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5770         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5771         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5772
5773         /* PCI attach */
5774         error = iwm_pci_attach(dev);
5775         if (error != 0)
5776                 goto fail;
5777
5778         sc->sc_wantresp = -1;
5779
5780         /* Check device type */
5781         error = iwm_dev_check(dev);
5782         if (error != 0)
5783                 goto fail;
5784
5785         /*
5786          * We now start fiddling with the hardware
5787          */
5788         /*
5789          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5790          * changed, and now the revision step also includes bit 0-1 (no more
5791          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5792          * in the old format.
5793          */
5794         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
5795                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5796                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5797
5798         if (iwm_prepare_card_hw(sc) != 0) {
5799                 device_printf(dev, "could not initialize hardware\n");
5800                 goto fail;
5801         }
5802
5803         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
5804                 int ret;
5805                 uint32_t hw_step;
5806
5807                 /*
5808                  * In order to recognize C step the driver should read the
5809                  * chip version id located at the AUX bus MISC address.
5810                  */
5811                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5812                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5813                 DELAY(2);
5814
5815                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5816                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5817                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5818                                    25000);
5819                 if (!ret) {
5820                         device_printf(sc->sc_dev,
5821                             "Failed to wake up the nic\n");
5822                         goto fail;
5823                 }
5824
5825                 if (iwm_nic_lock(sc)) {
5826                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5827                         hw_step |= IWM_ENABLE_WFPM;
5828                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5829                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5830                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5831                         if (hw_step == 0x3)
5832                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5833                                                 (IWM_SILICON_C_STEP << 2);
5834                         iwm_nic_unlock(sc);
5835                 } else {
5836                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
5837                         goto fail;
5838                 }
5839         }
5840
5841         /* Allocate DMA memory for firmware transfers. */
5842         if ((error = iwm_alloc_fwmem(sc)) != 0) {
5843                 device_printf(dev, "could not allocate memory for firmware\n");
5844                 goto fail;
5845         }
5846
5847         /* Allocate "Keep Warm" page. */
5848         if ((error = iwm_alloc_kw(sc)) != 0) {
5849                 device_printf(dev, "could not allocate keep warm page\n");
5850                 goto fail;
5851         }
5852
5853         /* We use ICT interrupts */
5854         if ((error = iwm_alloc_ict(sc)) != 0) {
5855                 device_printf(dev, "could not allocate ICT table\n");
5856                 goto fail;
5857         }
5858
5859         /* Allocate TX scheduler "rings". */
5860         if ((error = iwm_alloc_sched(sc)) != 0) {
5861                 device_printf(dev, "could not allocate TX scheduler rings\n");
5862                 goto fail;
5863         }
5864
5865         /* Allocate TX rings */
5866         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5867                 if ((error = iwm_alloc_tx_ring(sc,
5868                     &sc->txq[txq_i], txq_i)) != 0) {
5869                         device_printf(dev,
5870                             "could not allocate TX ring %d\n",
5871                             txq_i);
5872                         goto fail;
5873                 }
5874         }
5875
5876         /* Allocate RX ring. */
5877         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5878                 device_printf(dev, "could not allocate RX ring\n");
5879                 goto fail;
5880         }
5881
5882         /* Clear pending interrupts. */
5883         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5884
5885         ic->ic_softc = sc;
5886         ic->ic_name = device_get_nameunit(sc->sc_dev);
5887         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
5888         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
5889
5890         /* Set device capabilities. */
5891         ic->ic_caps =
5892             IEEE80211_C_STA |
5893             IEEE80211_C_WPA |           /* WPA/RSN */
5894             IEEE80211_C_WME |
5895             IEEE80211_C_SHSLOT |        /* short slot time supported */
5896             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
5897 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
5898             ;
5899         /* Advertise full-offload scanning */
5900         ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
5901         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5902                 sc->sc_phyctxt[i].id = i;
5903                 sc->sc_phyctxt[i].color = 0;
5904                 sc->sc_phyctxt[i].ref = 0;
5905                 sc->sc_phyctxt[i].channel = NULL;
5906         }
5907
5908         /* Default noise floor */
5909         sc->sc_noise = -96;
5910
5911         /* Max RSSI */
5912         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5913
5914         sc->sc_preinit_hook.ich_func = iwm_preinit;
5915         sc->sc_preinit_hook.ich_arg = sc;
5916         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5917                 device_printf(dev, "config_intrhook_establish failed\n");
5918                 goto fail;
5919         }
5920
5921 #ifdef IWM_DEBUG
5922         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5923             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
5924             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
5925 #endif
5926
5927         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5928             "<-%s\n", __func__);
5929
5930         return 0;
5931
5932         /* Free allocated memory if something failed during attachment. */
5933 fail:
5934         iwm_detach_local(sc, 0);
5935
5936         return ENXIO;
5937 }
5938
5939 static int
5940 iwm_is_valid_ether_addr(uint8_t *addr)
5941 {
5942         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
5943
5944         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
5945                 return (FALSE);
5946
5947         return (TRUE);
5948 }
5949
5950 static int
5951 iwm_update_edca(struct ieee80211com *ic)
5952 {
5953         struct iwm_softc *sc = ic->ic_softc;
5954
5955         device_printf(sc->sc_dev, "%s: called\n", __func__);
5956         return (0);
5957 }
5958
5959 static void
5960 iwm_preinit(void *arg)
5961 {
5962         struct iwm_softc *sc = arg;
5963         device_t dev = sc->sc_dev;
5964         struct ieee80211com *ic = &sc->sc_ic;
5965         int error;
5966
5967         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5968             "->%s\n", __func__);
5969
5970         IWM_LOCK(sc);
5971         if ((error = iwm_start_hw(sc)) != 0) {
5972                 device_printf(dev, "could not initialize hardware\n");
5973                 IWM_UNLOCK(sc);
5974                 goto fail;
5975         }
5976
5977         error = iwm_run_init_mvm_ucode(sc, 1);
5978         iwm_stop_device(sc);
5979         if (error) {
5980                 IWM_UNLOCK(sc);
5981                 goto fail;
5982         }
5983         device_printf(dev,
5984             "hw rev 0x%x, fw ver %s, address %s\n",
5985             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
5986             sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
5987
5988         /* not all hardware can do 5GHz band */
5989         if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
5990                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
5991                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
5992         IWM_UNLOCK(sc);
5993
5994         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
5995             ic->ic_channels);
5996
5997         /*
5998          * At this point we've committed - if we fail to do setup,
5999          * we now also have to tear down the net80211 state.
6000          */
6001         ieee80211_ifattach(ic);
6002         ic->ic_vap_create = iwm_vap_create;
6003         ic->ic_vap_delete = iwm_vap_delete;
6004         ic->ic_raw_xmit = iwm_raw_xmit;
6005         ic->ic_node_alloc = iwm_node_alloc;
6006         ic->ic_scan_start = iwm_scan_start;
6007         ic->ic_scan_end = iwm_scan_end;
6008         ic->ic_update_mcast = iwm_update_mcast;
6009         ic->ic_getradiocaps = iwm_init_channel_map;
6010         ic->ic_set_channel = iwm_set_channel;
6011         ic->ic_scan_curchan = iwm_scan_curchan;
6012         ic->ic_scan_mindwell = iwm_scan_mindwell;
6013         ic->ic_wme.wme_update = iwm_update_edca;
6014         ic->ic_parent = iwm_parent;
6015         ic->ic_transmit = iwm_transmit;
6016         iwm_radiotap_attach(sc);
6017         if (bootverbose)
6018                 ieee80211_announce(ic);
6019
6020         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6021             "<-%s\n", __func__);
6022         config_intrhook_disestablish(&sc->sc_preinit_hook);
6023
6024         return;
6025 fail:
6026         config_intrhook_disestablish(&sc->sc_preinit_hook);
6027         iwm_detach_local(sc, 0);
6028 }
6029
6030 /*
6031  * Attach the interface to 802.11 radiotap.
6032  */
6033 static void
6034 iwm_radiotap_attach(struct iwm_softc *sc)
6035 {
6036         struct ieee80211com *ic = &sc->sc_ic;
6037
6038         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6039             "->%s begin\n", __func__);
6040         ieee80211_radiotap_attach(ic,
6041             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6042                 IWM_TX_RADIOTAP_PRESENT,
6043             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6044                 IWM_RX_RADIOTAP_PRESENT);
6045         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6046             "->%s end\n", __func__);
6047 }
6048
6049 static struct ieee80211vap *
6050 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6051     enum ieee80211_opmode opmode, int flags,
6052     const uint8_t bssid[IEEE80211_ADDR_LEN],
6053     const uint8_t mac[IEEE80211_ADDR_LEN])
6054 {
6055         struct iwm_vap *ivp;
6056         struct ieee80211vap *vap;
6057
6058         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6059                 return NULL;
6060         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6061         vap = &ivp->iv_vap;
6062         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6063         vap->iv_bmissthreshold = 10;            /* override default */
6064         /* Override with driver methods. */
6065         ivp->iv_newstate = vap->iv_newstate;
6066         vap->iv_newstate = iwm_newstate;
6067
6068         ieee80211_ratectl_init(vap);
6069         /* Complete setup. */
6070         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6071             mac);
6072         ic->ic_opmode = opmode;
6073
6074         return vap;
6075 }
6076
6077 static void
6078 iwm_vap_delete(struct ieee80211vap *vap)
6079 {
6080         struct iwm_vap *ivp = IWM_VAP(vap);
6081
6082         ieee80211_ratectl_deinit(vap);
6083         ieee80211_vap_detach(vap);
6084         free(ivp, M_80211_VAP);
6085 }
6086
6087 static void
6088 iwm_scan_start(struct ieee80211com *ic)
6089 {
6090         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6091         struct iwm_softc *sc = ic->ic_softc;
6092         int error;
6093
6094         IWM_LOCK(sc);
6095         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6096                 error = iwm_mvm_umac_scan(sc);
6097         else
6098                 error = iwm_mvm_lmac_scan(sc);
6099         if (error != 0) {
6100                 device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
6101                 IWM_UNLOCK(sc);
6102                 ieee80211_cancel_scan(vap);
6103         } else {
6104                 iwm_led_blink_start(sc);
6105                 IWM_UNLOCK(sc);
6106         }
6107 }
6108
6109 static void
6110 iwm_scan_end(struct ieee80211com *ic)
6111 {
6112         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6113         struct iwm_softc *sc = ic->ic_softc;
6114
6115         IWM_LOCK(sc);
6116         iwm_led_blink_stop(sc);
6117         if (vap->iv_state == IEEE80211_S_RUN)
6118                 iwm_mvm_led_enable(sc);
6119         IWM_UNLOCK(sc);
6120 }
6121
6122 static void
6123 iwm_update_mcast(struct ieee80211com *ic)
6124 {
6125 }
6126
6127 static void
6128 iwm_set_channel(struct ieee80211com *ic)
6129 {
6130 }
6131
6132 static void
6133 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6134 {
6135 }
6136
6137 static void
6138 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6139 {
6140         return;
6141 }
6142
6143 void
6144 iwm_init_task(void *arg1)
6145 {
6146         struct iwm_softc *sc = arg1;
6147
6148         IWM_LOCK(sc);
6149         while (sc->sc_flags & IWM_FLAG_BUSY)
6150                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6151         sc->sc_flags |= IWM_FLAG_BUSY;
6152         iwm_stop(sc);
6153         if (sc->sc_ic.ic_nrunning > 0)
6154                 iwm_init(sc);
6155         sc->sc_flags &= ~IWM_FLAG_BUSY;
6156         wakeup(&sc->sc_flags);
6157         IWM_UNLOCK(sc);
6158 }
6159
6160 static int
6161 iwm_resume(device_t dev)
6162 {
6163         struct iwm_softc *sc = device_get_softc(dev);
6164         int do_reinit = 0;
6165         uint16_t reg;
6166
6167         /* Clear device-specific "PCI retry timeout" register (41h). */
6168         reg = pci_read_config(dev, 0x40, sizeof(reg));
6169         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
6170         iwm_init_task(device_get_softc(dev));
6171
6172         IWM_LOCK(sc);
6173         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6174                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6175                 do_reinit = 1;
6176         }
6177         IWM_UNLOCK(sc);
6178
6179         if (do_reinit)
6180                 ieee80211_resume_all(&sc->sc_ic);
6181
6182         return 0;
6183 }
6184
6185 static int
6186 iwm_suspend(device_t dev)
6187 {
6188         int do_stop = 0;
6189         struct iwm_softc *sc = device_get_softc(dev);
6190
6191         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6192
6193         ieee80211_suspend_all(&sc->sc_ic);
6194
6195         if (do_stop) {
6196                 IWM_LOCK(sc);
6197                 iwm_stop(sc);
6198                 sc->sc_flags |= IWM_FLAG_SCANNING;
6199                 IWM_UNLOCK(sc);
6200         }
6201
6202         return (0);
6203 }
6204
6205 static int
6206 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6207 {
6208         struct iwm_fw_info *fw = &sc->sc_fw;
6209         device_t dev = sc->sc_dev;
6210         int i;
6211
6212         if (do_net80211)
6213                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6214
6215         callout_drain(&sc->sc_led_blink_to);
6216         callout_drain(&sc->sc_watchdog_to);
6217         iwm_stop_device(sc);
6218         if (do_net80211) {
6219                 ieee80211_ifdetach(&sc->sc_ic);
6220         }
6221
6222         iwm_phy_db_free(sc);
6223
6224         /* Free descriptor rings */
6225         iwm_free_rx_ring(sc, &sc->rxq);
6226         for (i = 0; i < nitems(sc->txq); i++)
6227                 iwm_free_tx_ring(sc, &sc->txq[i]);
6228
6229         /* Free firmware */
6230         if (fw->fw_fp != NULL)
6231                 iwm_fw_info_free(fw);
6232
6233         /* Free scheduler */
6234         iwm_dma_contig_free(&sc->sched_dma);
6235         iwm_dma_contig_free(&sc->ict_dma);
6236         iwm_dma_contig_free(&sc->kw_dma);
6237         iwm_dma_contig_free(&sc->fw_dma);
6238
6239         /* Finished with the hardware - detach things */
6240         iwm_pci_detach(dev);
6241
6242         mbufq_drain(&sc->sc_snd);
6243         IWM_LOCK_DESTROY(sc);
6244
6245         return (0);
6246 }
6247
6248 static int
6249 iwm_detach(device_t dev)
6250 {
6251         struct iwm_softc *sc = device_get_softc(dev);
6252
6253         return (iwm_detach_local(sc, 1));
6254 }
6255
6256 static device_method_t iwm_pci_methods[] = {
6257         /* Device interface */
6258         DEVMETHOD(device_probe,         iwm_probe),
6259         DEVMETHOD(device_attach,        iwm_attach),
6260         DEVMETHOD(device_detach,        iwm_detach),
6261         DEVMETHOD(device_suspend,       iwm_suspend),
6262         DEVMETHOD(device_resume,        iwm_resume),
6263
6264         DEVMETHOD_END
6265 };
6266
6267 static driver_t iwm_pci_driver = {
6268         "iwm",
6269         iwm_pci_methods,
6270         sizeof (struct iwm_softc)
6271 };
6272
6273 static devclass_t iwm_devclass;
6274
6275 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6276 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6277 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6278 MODULE_DEPEND(iwm, wlan, 1, 1, 1);