]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
net80211: ieee80211_ratectl*: switch to reusable KPI
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109
110 #include <sys/param.h>
111 #include <sys/bus.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
126
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
130
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
133
134 #include <net/bpf.h>
135
136 #include <net/if.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
142
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
147
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
152
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_util.h>
157 #include <dev/iwm/if_iwm_binding.h>
158 #include <dev/iwm/if_iwm_phy_db.h>
159 #include <dev/iwm/if_iwm_mac_ctxt.h>
160 #include <dev/iwm/if_iwm_phy_ctxt.h>
161 #include <dev/iwm/if_iwm_time_event.h>
162 #include <dev/iwm/if_iwm_power.h>
163 #include <dev/iwm/if_iwm_scan.h>
164
165 #include <dev/iwm/if_iwm_pcie_trans.h>
166 #include <dev/iwm/if_iwm_led.h>
167
168 const uint8_t iwm_nvm_channels[] = {
169         /* 2.4 GHz */
170         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
171         /* 5 GHz */
172         36, 40, 44, 48, 52, 56, 60, 64,
173         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
174         149, 153, 157, 161, 165
175 };
176 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
177     "IWM_NUM_CHANNELS is too small");
178
179 const uint8_t iwm_nvm_channels_8000[] = {
180         /* 2.4 GHz */
181         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
182         /* 5 GHz */
183         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
184         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
185         149, 153, 157, 161, 165, 169, 173, 177, 181
186 };
187 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
188     "IWM_NUM_CHANNELS_8000 is too small");
189
190 #define IWM_NUM_2GHZ_CHANNELS   14
191 #define IWM_N_HW_ADDR_MASK      0xF
192
193 /*
194  * XXX For now, there's simply a fixed set of rate table entries
195  * that are populated.
196  */
197 const struct iwm_rate {
198         uint8_t rate;
199         uint8_t plcp;
200 } iwm_rates[] = {
201         {   2,  IWM_RATE_1M_PLCP  },
202         {   4,  IWM_RATE_2M_PLCP  },
203         {  11,  IWM_RATE_5M_PLCP  },
204         {  22,  IWM_RATE_11M_PLCP },
205         {  12,  IWM_RATE_6M_PLCP  },
206         {  18,  IWM_RATE_9M_PLCP  },
207         {  24,  IWM_RATE_12M_PLCP },
208         {  36,  IWM_RATE_18M_PLCP },
209         {  48,  IWM_RATE_24M_PLCP },
210         {  72,  IWM_RATE_36M_PLCP },
211         {  96,  IWM_RATE_48M_PLCP },
212         { 108,  IWM_RATE_54M_PLCP },
213 };
214 #define IWM_RIDX_CCK    0
215 #define IWM_RIDX_OFDM   4
216 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
217 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
218 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
219
220 struct iwm_nvm_section {
221         uint16_t length;
222         uint8_t *data;
223 };
224
225 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
226 static int      iwm_firmware_store_section(struct iwm_softc *,
227                                            enum iwm_ucode_type,
228                                            const uint8_t *, size_t);
229 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
230 static void     iwm_fw_info_free(struct iwm_fw_info *);
231 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
232 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
233 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
234                                      bus_size_t, bus_size_t);
235 static void     iwm_dma_contig_free(struct iwm_dma_info *);
236 static int      iwm_alloc_fwmem(struct iwm_softc *);
237 static int      iwm_alloc_sched(struct iwm_softc *);
238 static int      iwm_alloc_kw(struct iwm_softc *);
239 static int      iwm_alloc_ict(struct iwm_softc *);
240 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
241 static void     iwm_disable_rx_dma(struct iwm_softc *);
242 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
243 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
244 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
245                                   int);
246 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
247 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
248 static void     iwm_enable_interrupts(struct iwm_softc *);
249 static void     iwm_restore_interrupts(struct iwm_softc *);
250 static void     iwm_disable_interrupts(struct iwm_softc *);
251 static void     iwm_ict_reset(struct iwm_softc *);
252 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
253 static void     iwm_stop_device(struct iwm_softc *);
254 static void     iwm_mvm_nic_config(struct iwm_softc *);
255 static int      iwm_nic_rx_init(struct iwm_softc *);
256 static int      iwm_nic_tx_init(struct iwm_softc *);
257 static int      iwm_nic_init(struct iwm_softc *);
258 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
259 static int      iwm_post_alive(struct iwm_softc *);
260 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
261                                    uint16_t, uint8_t *, uint16_t *);
262 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
263                                      uint16_t *, size_t);
264 static uint32_t iwm_eeprom_channel_flags(uint16_t);
265 static void     iwm_add_channel_band(struct iwm_softc *,
266                     struct ieee80211_channel[], int, int *, int, size_t,
267                     const uint8_t[]);
268 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
269                     struct ieee80211_channel[]);
270 static int      iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
271                                    const uint16_t *, const uint16_t *,
272                                    const uint16_t *, const uint16_t *,
273                                    const uint16_t *);
274 static void     iwm_set_hw_address_8000(struct iwm_softc *,
275                                         struct iwm_nvm_data *,
276                                         const uint16_t *, const uint16_t *);
277 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
278                             const uint16_t *);
279 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
280 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
281                                   const uint16_t *);
282 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
283                                    const uint16_t *);
284 static void     iwm_set_radio_cfg(const struct iwm_softc *,
285                                   struct iwm_nvm_data *, uint32_t);
286 static int      iwm_parse_nvm_sections(struct iwm_softc *,
287                                        struct iwm_nvm_section *);
288 static int      iwm_nvm_init(struct iwm_softc *);
289 static int      iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
290                                        const uint8_t *, uint32_t);
291 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
292                                         const uint8_t *, uint32_t);
293 static int      iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
294 static int      iwm_load_cpu_sections_8000(struct iwm_softc *,
295                                            struct iwm_fw_sects *, int , int *);
296 static int      iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
297 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
298 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
299 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
300 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
301 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
302                                               enum iwm_ucode_type);
303 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
304 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
305 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
306 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
307                                             struct iwm_rx_phy_info *);
308 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
309                                       struct iwm_rx_packet *,
310                                       struct iwm_rx_data *);
311 static int      iwm_get_noise(struct iwm_softc *sc,
312                     const struct iwm_mvm_statistics_rx_non_phy *);
313 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
314                                    struct iwm_rx_data *);
315 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
316                                          struct iwm_rx_packet *,
317                                          struct iwm_node *);
318 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
319                                   struct iwm_rx_data *);
320 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
321 #if 0
322 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
323                                  uint16_t);
324 #endif
325 static const struct iwm_rate *
326         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
327                         struct mbuf *, struct iwm_tx_cmd *);
328 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
329                        struct ieee80211_node *, int);
330 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
331                              const struct ieee80211_bpf_params *);
332 static int      iwm_mvm_flush_tx_path(struct iwm_softc *sc,
333                                       uint32_t tfd_msk, uint32_t flags);
334 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
335                                                 struct iwm_mvm_add_sta_cmd_v7 *,
336                                                 int *);
337 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
338                                        int);
339 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
340 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
341 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
342                                            struct iwm_int_sta *,
343                                            const uint8_t *, uint16_t, uint16_t);
344 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
345 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
346 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
347 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
348 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
349 static struct ieee80211_node *
350                 iwm_node_alloc(struct ieee80211vap *,
351                                const uint8_t[IEEE80211_ADDR_LEN]);
352 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
353 static int      iwm_media_change(struct ifnet *);
354 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
355 static void     iwm_endscan_cb(void *, int);
356 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
357                                         struct iwm_sf_cfg_cmd *,
358                                         struct ieee80211_node *);
359 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
360 static int      iwm_send_bt_init_conf(struct iwm_softc *);
361 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
362 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
363 static int      iwm_init_hw(struct iwm_softc *);
364 static void     iwm_init(struct iwm_softc *);
365 static void     iwm_start(struct iwm_softc *);
366 static void     iwm_stop(struct iwm_softc *);
367 static void     iwm_watchdog(void *);
368 static void     iwm_parent(struct ieee80211com *);
369 #ifdef IWM_DEBUG
370 static const char *
371                 iwm_desc_lookup(uint32_t);
372 static void     iwm_nic_error(struct iwm_softc *);
373 static void     iwm_nic_umac_error(struct iwm_softc *);
374 #endif
375 static void     iwm_notif_intr(struct iwm_softc *);
376 static void     iwm_intr(void *);
377 static int      iwm_attach(device_t);
378 static int      iwm_is_valid_ether_addr(uint8_t *);
379 static void     iwm_preinit(void *);
380 static int      iwm_detach_local(struct iwm_softc *sc, int);
381 static void     iwm_init_task(void *);
382 static void     iwm_radiotap_attach(struct iwm_softc *);
383 static struct ieee80211vap *
384                 iwm_vap_create(struct ieee80211com *,
385                                const char [IFNAMSIZ], int,
386                                enum ieee80211_opmode, int,
387                                const uint8_t [IEEE80211_ADDR_LEN],
388                                const uint8_t [IEEE80211_ADDR_LEN]);
389 static void     iwm_vap_delete(struct ieee80211vap *);
390 static void     iwm_scan_start(struct ieee80211com *);
391 static void     iwm_scan_end(struct ieee80211com *);
392 static void     iwm_update_mcast(struct ieee80211com *);
393 static void     iwm_set_channel(struct ieee80211com *);
394 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
395 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
396 static int      iwm_detach(device_t);
397
398 /*
399  * Firmware parser.
400  */
401
402 static int
403 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
404 {
405         const struct iwm_fw_cscheme_list *l = (const void *)data;
406
407         if (dlen < sizeof(*l) ||
408             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
409                 return EINVAL;
410
411         /* we don't actually store anything for now, always use s/w crypto */
412
413         return 0;
414 }
415
416 static int
417 iwm_firmware_store_section(struct iwm_softc *sc,
418     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
419 {
420         struct iwm_fw_sects *fws;
421         struct iwm_fw_onesect *fwone;
422
423         if (type >= IWM_UCODE_TYPE_MAX)
424                 return EINVAL;
425         if (dlen < sizeof(uint32_t))
426                 return EINVAL;
427
428         fws = &sc->sc_fw.fw_sects[type];
429         if (fws->fw_count >= IWM_UCODE_SECT_MAX)
430                 return EINVAL;
431
432         fwone = &fws->fw_sect[fws->fw_count];
433
434         /* first 32bit are device load offset */
435         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
436
437         /* rest is data */
438         fwone->fws_data = data + sizeof(uint32_t);
439         fwone->fws_len = dlen - sizeof(uint32_t);
440
441         fws->fw_count++;
442
443         return 0;
444 }
445
446 #define IWM_DEFAULT_SCAN_CHANNELS 40
447
448 /* iwlwifi: iwl-drv.c */
449 struct iwm_tlv_calib_data {
450         uint32_t ucode_type;
451         struct iwm_tlv_calib_ctrl calib;
452 } __packed;
453
454 static int
455 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
456 {
457         const struct iwm_tlv_calib_data *def_calib = data;
458         uint32_t ucode_type = le32toh(def_calib->ucode_type);
459
460         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
461                 device_printf(sc->sc_dev,
462                     "Wrong ucode_type %u for default "
463                     "calibration.\n", ucode_type);
464                 return EINVAL;
465         }
466
467         sc->sc_default_calib[ucode_type].flow_trigger =
468             def_calib->calib.flow_trigger;
469         sc->sc_default_calib[ucode_type].event_trigger =
470             def_calib->calib.event_trigger;
471
472         return 0;
473 }
474
475 static void
476 iwm_fw_info_free(struct iwm_fw_info *fw)
477 {
478         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
479         fw->fw_fp = NULL;
480         /* don't touch fw->fw_status */
481         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
482 }
483
484 static int
485 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
486 {
487         struct iwm_fw_info *fw = &sc->sc_fw;
488         const struct iwm_tlv_ucode_header *uhdr;
489         struct iwm_ucode_tlv tlv;
490         enum iwm_ucode_tlv_type tlv_type;
491         const struct firmware *fwp;
492         const uint8_t *data;
493         int error = 0;
494         size_t len;
495
496         if (fw->fw_status == IWM_FW_STATUS_DONE &&
497             ucode_type != IWM_UCODE_TYPE_INIT)
498                 return 0;
499
500         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
501                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
502         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
503
504         if (fw->fw_fp != NULL)
505                 iwm_fw_info_free(fw);
506
507         /*
508          * Load firmware into driver memory.
509          * fw_fp will be set.
510          */
511         IWM_UNLOCK(sc);
512         fwp = firmware_get(sc->sc_fwname);
513         IWM_LOCK(sc);
514         if (fwp == NULL) {
515                 device_printf(sc->sc_dev,
516                     "could not read firmware %s (error %d)\n",
517                     sc->sc_fwname, error);
518                 goto out;
519         }
520         fw->fw_fp = fwp;
521
522         /* (Re-)Initialize default values. */
523         sc->sc_capaflags = 0;
524         sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
525         memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
526         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
527
528         /*
529          * Parse firmware contents
530          */
531
532         uhdr = (const void *)fw->fw_fp->data;
533         if (*(const uint32_t *)fw->fw_fp->data != 0
534             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
535                 device_printf(sc->sc_dev, "invalid firmware %s\n",
536                     sc->sc_fwname);
537                 error = EINVAL;
538                 goto out;
539         }
540
541         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
542             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
543             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
544             IWM_UCODE_API(le32toh(uhdr->ver)));
545         data = uhdr->data;
546         len = fw->fw_fp->datasize - sizeof(*uhdr);
547
548         while (len >= sizeof(tlv)) {
549                 size_t tlv_len;
550                 const void *tlv_data;
551
552                 memcpy(&tlv, data, sizeof(tlv));
553                 tlv_len = le32toh(tlv.length);
554                 tlv_type = le32toh(tlv.type);
555
556                 len -= sizeof(tlv);
557                 data += sizeof(tlv);
558                 tlv_data = data;
559
560                 if (len < tlv_len) {
561                         device_printf(sc->sc_dev,
562                             "firmware too short: %zu bytes\n",
563                             len);
564                         error = EINVAL;
565                         goto parse_out;
566                 }
567
568                 switch ((int)tlv_type) {
569                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
570                         if (tlv_len < sizeof(uint32_t)) {
571                                 device_printf(sc->sc_dev,
572                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
573                                     __func__,
574                                     (int) tlv_len);
575                                 error = EINVAL;
576                                 goto parse_out;
577                         }
578                         sc->sc_capa_max_probe_len
579                             = le32toh(*(const uint32_t *)tlv_data);
580                         /* limit it to something sensible */
581                         if (sc->sc_capa_max_probe_len >
582                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
583                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
584                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
585                                     "ridiculous\n", __func__);
586                                 error = EINVAL;
587                                 goto parse_out;
588                         }
589                         break;
590                 case IWM_UCODE_TLV_PAN:
591                         if (tlv_len) {
592                                 device_printf(sc->sc_dev,
593                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
594                                     __func__,
595                                     (int) tlv_len);
596                                 error = EINVAL;
597                                 goto parse_out;
598                         }
599                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
600                         break;
601                 case IWM_UCODE_TLV_FLAGS:
602                         if (tlv_len < sizeof(uint32_t)) {
603                                 device_printf(sc->sc_dev,
604                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
605                                     __func__,
606                                     (int) tlv_len);
607                                 error = EINVAL;
608                                 goto parse_out;
609                         }
610                         /*
611                          * Apparently there can be many flags, but Linux driver
612                          * parses only the first one, and so do we.
613                          *
614                          * XXX: why does this override IWM_UCODE_TLV_PAN?
615                          * Intentional or a bug?  Observations from
616                          * current firmware file:
617                          *  1) TLV_PAN is parsed first
618                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
619                          * ==> this resets TLV_PAN to itself... hnnnk
620                          */
621                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
622                         break;
623                 case IWM_UCODE_TLV_CSCHEME:
624                         if ((error = iwm_store_cscheme(sc,
625                             tlv_data, tlv_len)) != 0) {
626                                 device_printf(sc->sc_dev,
627                                     "%s: iwm_store_cscheme(): returned %d\n",
628                                     __func__,
629                                     error);
630                                 goto parse_out;
631                         }
632                         break;
633                 case IWM_UCODE_TLV_NUM_OF_CPU: {
634                         uint32_t num_cpu;
635                         if (tlv_len != sizeof(uint32_t)) {
636                                 device_printf(sc->sc_dev,
637                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
638                                     __func__,
639                                     (int) tlv_len);
640                                 error = EINVAL;
641                                 goto parse_out;
642                         }
643                         num_cpu = le32toh(*(const uint32_t *)tlv_data);
644                         if (num_cpu < 1 || num_cpu > 2) {
645                                 device_printf(sc->sc_dev,
646                                     "%s: Driver supports only 1 or 2 CPUs\n",
647                                     __func__);
648                                 error = EINVAL;
649                                 goto parse_out;
650                         }
651                         break;
652                 }
653                 case IWM_UCODE_TLV_SEC_RT:
654                         if ((error = iwm_firmware_store_section(sc,
655                             IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
656                                 device_printf(sc->sc_dev,
657                                     "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
658                                     __func__,
659                                     error);
660                                 goto parse_out;
661                         }
662                         break;
663                 case IWM_UCODE_TLV_SEC_INIT:
664                         if ((error = iwm_firmware_store_section(sc,
665                             IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
666                                 device_printf(sc->sc_dev,
667                                     "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
668                                     __func__,
669                                     error);
670                                 goto parse_out;
671                         }
672                         break;
673                 case IWM_UCODE_TLV_SEC_WOWLAN:
674                         if ((error = iwm_firmware_store_section(sc,
675                             IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
676                                 device_printf(sc->sc_dev,
677                                     "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
678                                     __func__,
679                                     error);
680                                 goto parse_out;
681                         }
682                         break;
683                 case IWM_UCODE_TLV_DEF_CALIB:
684                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
685                                 device_printf(sc->sc_dev,
686                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
687                                     __func__,
688                                     (int) tlv_len,
689                                     (int) sizeof(struct iwm_tlv_calib_data));
690                                 error = EINVAL;
691                                 goto parse_out;
692                         }
693                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
694                                 device_printf(sc->sc_dev,
695                                     "%s: iwm_set_default_calib() failed: %d\n",
696                                     __func__,
697                                     error);
698                                 goto parse_out;
699                         }
700                         break;
701                 case IWM_UCODE_TLV_PHY_SKU:
702                         if (tlv_len != sizeof(uint32_t)) {
703                                 error = EINVAL;
704                                 device_printf(sc->sc_dev,
705                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
706                                     __func__,
707                                     (int) tlv_len);
708                                 goto parse_out;
709                         }
710                         sc->sc_fw_phy_config =
711                             le32toh(*(const uint32_t *)tlv_data);
712                         break;
713
714                 case IWM_UCODE_TLV_API_CHANGES_SET: {
715                         const struct iwm_ucode_api *api;
716                         if (tlv_len != sizeof(*api)) {
717                                 error = EINVAL;
718                                 goto parse_out;
719                         }
720                         api = (const struct iwm_ucode_api *)tlv_data;
721                         /* Flags may exceed 32 bits in future firmware. */
722                         if (le32toh(api->api_index) > 0) {
723                                 device_printf(sc->sc_dev,
724                                     "unsupported API index %d\n",
725                                     le32toh(api->api_index));
726                                 goto parse_out;
727                         }
728                         sc->sc_ucode_api = le32toh(api->api_flags);
729                         break;
730                 }
731
732                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
733                         const struct iwm_ucode_capa *capa;
734                         int idx, i;
735                         if (tlv_len != sizeof(*capa)) {
736                                 error = EINVAL;
737                                 goto parse_out;
738                         }
739                         capa = (const struct iwm_ucode_capa *)tlv_data;
740                         idx = le32toh(capa->api_index);
741                         if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
742                                 device_printf(sc->sc_dev,
743                                     "unsupported API index %d\n", idx);
744                                 goto parse_out;
745                         }
746                         for (i = 0; i < 32; i++) {
747                                 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
748                                         continue;
749                                 setbit(sc->sc_enabled_capa, i + (32 * idx));
750                         }
751                         break;
752                 }
753
754                 case 48: /* undocumented TLV */
755                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
756                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
757                         /* ignore, not used by current driver */
758                         break;
759
760                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
761                         if ((error = iwm_firmware_store_section(sc,
762                             IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
763                             tlv_len)) != 0)
764                                 goto parse_out;
765                         break;
766
767                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
768                         if (tlv_len != sizeof(uint32_t)) {
769                                 error = EINVAL;
770                                 goto parse_out;
771                         }
772                         sc->sc_capa_n_scan_channels =
773                           le32toh(*(const uint32_t *)tlv_data);
774                         break;
775
776                 case IWM_UCODE_TLV_FW_VERSION:
777                         if (tlv_len != sizeof(uint32_t) * 3) {
778                                 error = EINVAL;
779                                 goto parse_out;
780                         }
781                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
782                             "%d.%d.%d",
783                             le32toh(((const uint32_t *)tlv_data)[0]),
784                             le32toh(((const uint32_t *)tlv_data)[1]),
785                             le32toh(((const uint32_t *)tlv_data)[2]));
786                         break;
787
788                 default:
789                         device_printf(sc->sc_dev,
790                             "%s: unknown firmware section %d, abort\n",
791                             __func__, tlv_type);
792                         error = EINVAL;
793                         goto parse_out;
794                 }
795
796                 len -= roundup(tlv_len, 4);
797                 data += roundup(tlv_len, 4);
798         }
799
800         KASSERT(error == 0, ("unhandled error"));
801
802  parse_out:
803         if (error) {
804                 device_printf(sc->sc_dev, "firmware parse error %d, "
805                     "section type %d\n", error, tlv_type);
806         }
807
808         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
809                 device_printf(sc->sc_dev,
810                     "device uses unsupported power ops\n");
811                 error = ENOTSUP;
812         }
813
814  out:
815         if (error) {
816                 fw->fw_status = IWM_FW_STATUS_NONE;
817                 if (fw->fw_fp != NULL)
818                         iwm_fw_info_free(fw);
819         } else
820                 fw->fw_status = IWM_FW_STATUS_DONE;
821         wakeup(&sc->sc_fw);
822
823         return error;
824 }
825
826 /*
827  * DMA resource routines
828  */
829
830 static void
831 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
832 {
833         if (error != 0)
834                 return;
835         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
836         *(bus_addr_t *)arg = segs[0].ds_addr;
837 }
838
839 static int
840 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
841     bus_size_t size, bus_size_t alignment)
842 {
843         int error;
844
845         dma->tag = NULL;
846         dma->map = NULL;
847         dma->size = size;
848         dma->vaddr = NULL;
849
850         error = bus_dma_tag_create(tag, alignment,
851             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
852             1, size, 0, NULL, NULL, &dma->tag);
853         if (error != 0)
854                 goto fail;
855
856         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
857             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
858         if (error != 0)
859                 goto fail;
860
861         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
862             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
863         if (error != 0) {
864                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
865                 dma->vaddr = NULL;
866                 goto fail;
867         }
868
869         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
870
871         return 0;
872
873 fail:
874         iwm_dma_contig_free(dma);
875
876         return error;
877 }
878
879 static void
880 iwm_dma_contig_free(struct iwm_dma_info *dma)
881 {
882         if (dma->vaddr != NULL) {
883                 bus_dmamap_sync(dma->tag, dma->map,
884                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
885                 bus_dmamap_unload(dma->tag, dma->map);
886                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
887                 dma->vaddr = NULL;
888         }
889         if (dma->tag != NULL) {
890                 bus_dma_tag_destroy(dma->tag);
891                 dma->tag = NULL;
892         }
893 }
894
895 /* fwmem is used to load firmware onto the card */
896 static int
897 iwm_alloc_fwmem(struct iwm_softc *sc)
898 {
899         /* Must be aligned on a 16-byte boundary. */
900         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
901             sc->sc_fwdmasegsz, 16);
902 }
903
904 /* tx scheduler rings.  not used? */
905 static int
906 iwm_alloc_sched(struct iwm_softc *sc)
907 {
908         /* TX scheduler rings must be aligned on a 1KB boundary. */
909         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
910             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
911 }
912
913 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
914 static int
915 iwm_alloc_kw(struct iwm_softc *sc)
916 {
917         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
918 }
919
920 /* interrupt cause table */
921 static int
922 iwm_alloc_ict(struct iwm_softc *sc)
923 {
924         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
925             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
926 }
927
928 static int
929 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
930 {
931         bus_size_t size;
932         int i, error;
933
934         ring->cur = 0;
935
936         /* Allocate RX descriptors (256-byte aligned). */
937         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
938         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
939         if (error != 0) {
940                 device_printf(sc->sc_dev,
941                     "could not allocate RX ring DMA memory\n");
942                 goto fail;
943         }
944         ring->desc = ring->desc_dma.vaddr;
945
946         /* Allocate RX status area (16-byte aligned). */
947         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
948             sizeof(*ring->stat), 16);
949         if (error != 0) {
950                 device_printf(sc->sc_dev,
951                     "could not allocate RX status DMA memory\n");
952                 goto fail;
953         }
954         ring->stat = ring->stat_dma.vaddr;
955
956         /* Create RX buffer DMA tag. */
957         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
958             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
959             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
960         if (error != 0) {
961                 device_printf(sc->sc_dev,
962                     "%s: could not create RX buf DMA tag, error %d\n",
963                     __func__, error);
964                 goto fail;
965         }
966
967         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
968         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
969         if (error != 0) {
970                 device_printf(sc->sc_dev,
971                     "%s: could not create RX buf DMA map, error %d\n",
972                     __func__, error);
973                 goto fail;
974         }
975         /*
976          * Allocate and map RX buffers.
977          */
978         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
979                 struct iwm_rx_data *data = &ring->data[i];
980                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
981                 if (error != 0) {
982                         device_printf(sc->sc_dev,
983                             "%s: could not create RX buf DMA map, error %d\n",
984                             __func__, error);
985                         goto fail;
986                 }
987                 data->m = NULL;
988
989                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
990                         goto fail;
991                 }
992         }
993         return 0;
994
995 fail:   iwm_free_rx_ring(sc, ring);
996         return error;
997 }
998
999 static void
1000 iwm_disable_rx_dma(struct iwm_softc *sc)
1001 {
1002         /* XXX conditional nic locks are stupid */
1003         /* XXX print out if we can't lock the NIC? */
1004         if (iwm_nic_lock(sc)) {
1005                 /* XXX handle if RX stop doesn't finish? */
1006                 (void) iwm_pcie_rx_stop(sc);
1007                 iwm_nic_unlock(sc);
1008         }
1009 }
1010
1011 static void
1012 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1013 {
1014         /* Reset the ring state */
1015         ring->cur = 0;
1016
1017         /*
1018          * The hw rx ring index in shared memory must also be cleared,
1019          * otherwise the discrepancy can cause reprocessing chaos.
1020          */
1021         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1022 }
1023
1024 static void
1025 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1026 {
1027         int i;
1028
1029         iwm_dma_contig_free(&ring->desc_dma);
1030         iwm_dma_contig_free(&ring->stat_dma);
1031
1032         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1033                 struct iwm_rx_data *data = &ring->data[i];
1034
1035                 if (data->m != NULL) {
1036                         bus_dmamap_sync(ring->data_dmat, data->map,
1037                             BUS_DMASYNC_POSTREAD);
1038                         bus_dmamap_unload(ring->data_dmat, data->map);
1039                         m_freem(data->m);
1040                         data->m = NULL;
1041                 }
1042                 if (data->map != NULL) {
1043                         bus_dmamap_destroy(ring->data_dmat, data->map);
1044                         data->map = NULL;
1045                 }
1046         }
1047         if (ring->spare_map != NULL) {
1048                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1049                 ring->spare_map = NULL;
1050         }
1051         if (ring->data_dmat != NULL) {
1052                 bus_dma_tag_destroy(ring->data_dmat);
1053                 ring->data_dmat = NULL;
1054         }
1055 }
1056
1057 static int
1058 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1059 {
1060         bus_addr_t paddr;
1061         bus_size_t size;
1062         size_t maxsize;
1063         int nsegments;
1064         int i, error;
1065
1066         ring->qid = qid;
1067         ring->queued = 0;
1068         ring->cur = 0;
1069
1070         /* Allocate TX descriptors (256-byte aligned). */
1071         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1072         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1073         if (error != 0) {
1074                 device_printf(sc->sc_dev,
1075                     "could not allocate TX ring DMA memory\n");
1076                 goto fail;
1077         }
1078         ring->desc = ring->desc_dma.vaddr;
1079
1080         /*
1081          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1082          * to allocate commands space for other rings.
1083          */
1084         if (qid > IWM_MVM_CMD_QUEUE)
1085                 return 0;
1086
1087         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1088         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1089         if (error != 0) {
1090                 device_printf(sc->sc_dev,
1091                     "could not allocate TX cmd DMA memory\n");
1092                 goto fail;
1093         }
1094         ring->cmd = ring->cmd_dma.vaddr;
1095
1096         /* FW commands may require more mapped space than packets. */
1097         if (qid == IWM_MVM_CMD_QUEUE) {
1098                 maxsize = IWM_RBUF_SIZE;
1099                 nsegments = 1;
1100         } else {
1101                 maxsize = MCLBYTES;
1102                 nsegments = IWM_MAX_SCATTER - 2;
1103         }
1104
1105         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1106             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1107             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1108         if (error != 0) {
1109                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1110                 goto fail;
1111         }
1112
1113         paddr = ring->cmd_dma.paddr;
1114         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1115                 struct iwm_tx_data *data = &ring->data[i];
1116
1117                 data->cmd_paddr = paddr;
1118                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1119                     + offsetof(struct iwm_tx_cmd, scratch);
1120                 paddr += sizeof(struct iwm_device_cmd);
1121
1122                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1123                 if (error != 0) {
1124                         device_printf(sc->sc_dev,
1125                             "could not create TX buf DMA map\n");
1126                         goto fail;
1127                 }
1128         }
1129         KASSERT(paddr == ring->cmd_dma.paddr + size,
1130             ("invalid physical address"));
1131         return 0;
1132
1133 fail:   iwm_free_tx_ring(sc, ring);
1134         return error;
1135 }
1136
1137 static void
1138 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1139 {
1140         int i;
1141
1142         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1143                 struct iwm_tx_data *data = &ring->data[i];
1144
1145                 if (data->m != NULL) {
1146                         bus_dmamap_sync(ring->data_dmat, data->map,
1147                             BUS_DMASYNC_POSTWRITE);
1148                         bus_dmamap_unload(ring->data_dmat, data->map);
1149                         m_freem(data->m);
1150                         data->m = NULL;
1151                 }
1152         }
1153         /* Clear TX descriptors. */
1154         memset(ring->desc, 0, ring->desc_dma.size);
1155         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1156             BUS_DMASYNC_PREWRITE);
1157         sc->qfullmsk &= ~(1 << ring->qid);
1158         ring->queued = 0;
1159         ring->cur = 0;
1160 }
1161
1162 static void
1163 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1164 {
1165         int i;
1166
1167         iwm_dma_contig_free(&ring->desc_dma);
1168         iwm_dma_contig_free(&ring->cmd_dma);
1169
1170         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1171                 struct iwm_tx_data *data = &ring->data[i];
1172
1173                 if (data->m != NULL) {
1174                         bus_dmamap_sync(ring->data_dmat, data->map,
1175                             BUS_DMASYNC_POSTWRITE);
1176                         bus_dmamap_unload(ring->data_dmat, data->map);
1177                         m_freem(data->m);
1178                         data->m = NULL;
1179                 }
1180                 if (data->map != NULL) {
1181                         bus_dmamap_destroy(ring->data_dmat, data->map);
1182                         data->map = NULL;
1183                 }
1184         }
1185         if (ring->data_dmat != NULL) {
1186                 bus_dma_tag_destroy(ring->data_dmat);
1187                 ring->data_dmat = NULL;
1188         }
1189 }
1190
1191 /*
1192  * High-level hardware frobbing routines
1193  */
1194
1195 static void
1196 iwm_enable_interrupts(struct iwm_softc *sc)
1197 {
1198         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1199         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1200 }
1201
1202 static void
1203 iwm_restore_interrupts(struct iwm_softc *sc)
1204 {
1205         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1206 }
1207
1208 static void
1209 iwm_disable_interrupts(struct iwm_softc *sc)
1210 {
1211         /* disable interrupts */
1212         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1213
1214         /* acknowledge all interrupts */
1215         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1216         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1217 }
1218
1219 static void
1220 iwm_ict_reset(struct iwm_softc *sc)
1221 {
1222         iwm_disable_interrupts(sc);
1223
1224         /* Reset ICT table. */
1225         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1226         sc->ict_cur = 0;
1227
1228         /* Set physical address of ICT table (4KB aligned). */
1229         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1230             IWM_CSR_DRAM_INT_TBL_ENABLE
1231             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1232             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1233             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1234
1235         /* Switch to ICT interrupt mode in driver. */
1236         sc->sc_flags |= IWM_FLAG_USE_ICT;
1237
1238         /* Re-enable interrupts. */
1239         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1240         iwm_enable_interrupts(sc);
1241 }
1242
1243 /* iwlwifi pcie/trans.c */
1244
1245 /*
1246  * Since this .. hard-resets things, it's time to actually
1247  * mark the first vap (if any) as having no mac context.
1248  * It's annoying, but since the driver is potentially being
1249  * stop/start'ed whilst active (thanks openbsd port!) we
1250  * have to correctly track this.
1251  */
1252 static void
1253 iwm_stop_device(struct iwm_softc *sc)
1254 {
1255         struct ieee80211com *ic = &sc->sc_ic;
1256         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1257         int chnl, qid;
1258         uint32_t mask = 0;
1259
1260         /* tell the device to stop sending interrupts */
1261         iwm_disable_interrupts(sc);
1262
1263         /*
1264          * FreeBSD-local: mark the first vap as not-uploaded,
1265          * so the next transition through auth/assoc
1266          * will correctly populate the MAC context.
1267          */
1268         if (vap) {
1269                 struct iwm_vap *iv = IWM_VAP(vap);
1270                 iv->is_uploaded = 0;
1271         }
1272
1273         /* device going down, Stop using ICT table */
1274         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1275
1276         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1277
1278         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1279
1280         if (iwm_nic_lock(sc)) {
1281                 /* Stop each Tx DMA channel */
1282                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1283                         IWM_WRITE(sc,
1284                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1285                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1286                 }
1287
1288                 /* Wait for DMA channels to be idle */
1289                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1290                     5000)) {
1291                         device_printf(sc->sc_dev,
1292                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1293                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1294                 }
1295                 iwm_nic_unlock(sc);
1296         }
1297         iwm_disable_rx_dma(sc);
1298
1299         /* Stop RX ring. */
1300         iwm_reset_rx_ring(sc, &sc->rxq);
1301
1302         /* Reset all TX rings. */
1303         for (qid = 0; qid < nitems(sc->txq); qid++)
1304                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1305
1306         /*
1307          * Power-down device's busmaster DMA clocks
1308          */
1309         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1310         DELAY(5);
1311
1312         /* Make sure (redundant) we've released our request to stay awake */
1313         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1314             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1315
1316         /* Stop the device, and put it in low power state */
1317         iwm_apm_stop(sc);
1318
1319         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1320          * Clean again the interrupt here
1321          */
1322         iwm_disable_interrupts(sc);
1323         /* stop and reset the on-board processor */
1324         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1325
1326         /*
1327          * Even if we stop the HW, we still want the RF kill
1328          * interrupt
1329          */
1330         iwm_enable_rfkill_int(sc);
1331         iwm_check_rfkill(sc);
1332 }
1333
1334 /* iwlwifi: mvm/ops.c */
1335 static void
1336 iwm_mvm_nic_config(struct iwm_softc *sc)
1337 {
1338         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1339         uint32_t reg_val = 0;
1340
1341         radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1342             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1343         radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1344             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1345         radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1346             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1347
1348         /* SKU control */
1349         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1350             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1351         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1352             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1353
1354         /* radio configuration */
1355         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1356         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1357         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1358
1359         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1360
1361         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1362             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1363             radio_cfg_step, radio_cfg_dash);
1364
1365         /*
1366          * W/A : NIC is stuck in a reset state after Early PCIe power off
1367          * (PCIe power is lost before PERST# is asserted), causing ME FW
1368          * to lose ownership and not being able to obtain it back.
1369          */
1370         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1371                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1372                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1373                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1374         }
1375 }
1376
1377 static int
1378 iwm_nic_rx_init(struct iwm_softc *sc)
1379 {
1380         if (!iwm_nic_lock(sc))
1381                 return EBUSY;
1382
1383         /*
1384          * Initialize RX ring.  This is from the iwn driver.
1385          */
1386         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1387
1388         /* stop DMA */
1389         iwm_disable_rx_dma(sc);
1390         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1391         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1392         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1393         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1394
1395         /* Set physical address of RX ring (256-byte aligned). */
1396         IWM_WRITE(sc,
1397             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1398
1399         /* Set physical address of RX status (16-byte aligned). */
1400         IWM_WRITE(sc,
1401             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1402
1403         /* Enable RX. */
1404         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1405             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1406             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1407             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1408             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1409             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1410             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1411             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1412
1413         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1414
1415         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1416         if (sc->host_interrupt_operation_mode)
1417                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1418
1419         /*
1420          * Thus sayeth el jefe (iwlwifi) via a comment:
1421          *
1422          * This value should initially be 0 (before preparing any
1423          * RBs), should be 8 after preparing the first 8 RBs (for example)
1424          */
1425         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1426
1427         iwm_nic_unlock(sc);
1428
1429         return 0;
1430 }
1431
1432 static int
1433 iwm_nic_tx_init(struct iwm_softc *sc)
1434 {
1435         int qid;
1436
1437         if (!iwm_nic_lock(sc))
1438                 return EBUSY;
1439
1440         /* Deactivate TX scheduler. */
1441         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1442
1443         /* Set physical address of "keep warm" page (16-byte aligned). */
1444         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1445
1446         /* Initialize TX rings. */
1447         for (qid = 0; qid < nitems(sc->txq); qid++) {
1448                 struct iwm_tx_ring *txq = &sc->txq[qid];
1449
1450                 /* Set physical address of TX ring (256-byte aligned). */
1451                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1452                     txq->desc_dma.paddr >> 8);
1453                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1454                     "%s: loading ring %d descriptors (%p) at %lx\n",
1455                     __func__,
1456                     qid, txq->desc,
1457                     (unsigned long) (txq->desc_dma.paddr >> 8));
1458         }
1459
1460         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1461
1462         iwm_nic_unlock(sc);
1463
1464         return 0;
1465 }
1466
1467 static int
1468 iwm_nic_init(struct iwm_softc *sc)
1469 {
1470         int error;
1471
1472         iwm_apm_init(sc);
1473         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1474                 iwm_set_pwr(sc);
1475
1476         iwm_mvm_nic_config(sc);
1477
1478         if ((error = iwm_nic_rx_init(sc)) != 0)
1479                 return error;
1480
1481         /*
1482          * Ditto for TX, from iwn
1483          */
1484         if ((error = iwm_nic_tx_init(sc)) != 0)
1485                 return error;
1486
1487         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1488             "%s: shadow registers enabled\n", __func__);
1489         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1490
1491         return 0;
1492 }
1493
1494 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1495         IWM_MVM_TX_FIFO_VO,
1496         IWM_MVM_TX_FIFO_VI,
1497         IWM_MVM_TX_FIFO_BE,
1498         IWM_MVM_TX_FIFO_BK,
1499 };
1500
1501 static int
1502 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1503 {
1504         if (!iwm_nic_lock(sc)) {
1505                 device_printf(sc->sc_dev,
1506                     "%s: cannot enable txq %d\n",
1507                     __func__,
1508                     qid);
1509                 return EBUSY;
1510         }
1511
1512         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1513
1514         if (qid == IWM_MVM_CMD_QUEUE) {
1515                 /* unactivate before configuration */
1516                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1517                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1518                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1519
1520                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1521
1522                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1523
1524                 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1525                 /* Set scheduler window size and frame limit. */
1526                 iwm_write_mem32(sc,
1527                     sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1528                     sizeof(uint32_t),
1529                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1530                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1531                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1532                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1533
1534                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1535                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1536                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1537                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1538                     IWM_SCD_QUEUE_STTS_REG_MSK);
1539         } else {
1540                 struct iwm_scd_txq_cfg_cmd cmd;
1541                 int error;
1542
1543                 iwm_nic_unlock(sc);
1544
1545                 memset(&cmd, 0, sizeof(cmd));
1546                 cmd.scd_queue = qid;
1547                 cmd.enable = 1;
1548                 cmd.sta_id = sta_id;
1549                 cmd.tx_fifo = fifo;
1550                 cmd.aggregate = 0;
1551                 cmd.window = IWM_FRAME_LIMIT;
1552
1553                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1554                     sizeof(cmd), &cmd);
1555                 if (error) {
1556                         device_printf(sc->sc_dev,
1557                             "cannot enable txq %d\n", qid);
1558                         return error;
1559                 }
1560
1561                 if (!iwm_nic_lock(sc))
1562                         return EBUSY;
1563         }
1564
1565         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1566             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1567
1568         iwm_nic_unlock(sc);
1569
1570         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1571             __func__, qid, fifo);
1572
1573         return 0;
1574 }
1575
1576 static int
1577 iwm_post_alive(struct iwm_softc *sc)
1578 {
1579         int nwords;
1580         int error, chnl;
1581         uint32_t base;
1582
1583         if (!iwm_nic_lock(sc))
1584                 return EBUSY;
1585
1586         base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1587         if (sc->sched_base != base) {
1588                 device_printf(sc->sc_dev,
1589                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1590                     __func__, sc->sched_base, base);
1591         }
1592
1593         iwm_ict_reset(sc);
1594
1595         /* Clear TX scheduler state in SRAM. */
1596         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1597             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1598             / sizeof(uint32_t);
1599         error = iwm_write_mem(sc,
1600             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1601             NULL, nwords);
1602         if (error)
1603                 goto out;
1604
1605         /* Set physical address of TX scheduler rings (1KB aligned). */
1606         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1607
1608         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1609
1610         iwm_nic_unlock(sc);
1611
1612         /* enable command channel */
1613         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1614         if (error)
1615                 return error;
1616
1617         if (!iwm_nic_lock(sc))
1618                 return EBUSY;
1619
1620         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1621
1622         /* Enable DMA channels. */
1623         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1624                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1625                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1626                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1627         }
1628
1629         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1630             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1631
1632         /* Enable L1-Active */
1633         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1634                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1635                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1636         }
1637
1638  out:
1639         iwm_nic_unlock(sc);
1640         return error;
1641 }
1642
1643 /*
1644  * NVM read access and content parsing.  We do not support
1645  * external NVM or writing NVM.
1646  * iwlwifi/mvm/nvm.c
1647  */
1648
1649 /* list of NVM sections we are allowed/need to read */
1650 const int nvm_to_read[] = {
1651         IWM_NVM_SECTION_TYPE_HW,
1652         IWM_NVM_SECTION_TYPE_SW,
1653         IWM_NVM_SECTION_TYPE_REGULATORY,
1654         IWM_NVM_SECTION_TYPE_CALIBRATION,
1655         IWM_NVM_SECTION_TYPE_PRODUCTION,
1656         IWM_NVM_SECTION_TYPE_HW_8000,
1657         IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
1658         IWM_NVM_SECTION_TYPE_PHY_SKU,
1659 };
1660
1661 /* Default NVM size to read */
1662 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1663 #define IWM_MAX_NVM_SECTION_SIZE        8192
1664
1665 #define IWM_NVM_WRITE_OPCODE 1
1666 #define IWM_NVM_READ_OPCODE 0
1667
1668 /* load nvm chunk response */
1669 #define IWM_READ_NVM_CHUNK_SUCCEED              0
1670 #define IWM_READ_NVM_CHUNK_INVALID_ADDRESS      1
1671
1672 static int
1673 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1674         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1675 {
1676         offset = 0;
1677         struct iwm_nvm_access_cmd nvm_access_cmd = {
1678                 .offset = htole16(offset),
1679                 .length = htole16(length),
1680                 .type = htole16(section),
1681                 .op_code = IWM_NVM_READ_OPCODE,
1682         };
1683         struct iwm_nvm_access_resp *nvm_resp;
1684         struct iwm_rx_packet *pkt;
1685         struct iwm_host_cmd cmd = {
1686                 .id = IWM_NVM_ACCESS_CMD,
1687                 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1688                     IWM_CMD_SEND_IN_RFKILL,
1689                 .data = { &nvm_access_cmd, },
1690         };
1691         int ret, offset_read;
1692         size_t bytes_read;
1693         uint8_t *resp_data;
1694
1695         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1696
1697         ret = iwm_send_cmd(sc, &cmd);
1698         if (ret) {
1699                 device_printf(sc->sc_dev,
1700                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1701                 return ret;
1702         }
1703
1704         pkt = cmd.resp_pkt;
1705         if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1706                 device_printf(sc->sc_dev,
1707                     "Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1708                     pkt->hdr.flags);
1709                 ret = EIO;
1710                 goto exit;
1711         }
1712
1713         /* Extract NVM response */
1714         nvm_resp = (void *)pkt->data;
1715
1716         ret = le16toh(nvm_resp->status);
1717         bytes_read = le16toh(nvm_resp->length);
1718         offset_read = le16toh(nvm_resp->offset);
1719         resp_data = nvm_resp->data;
1720         if (ret) {
1721                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1722                     "NVM access command failed with status %d\n", ret);
1723                 ret = EINVAL;
1724                 goto exit;
1725         }
1726
1727         if (offset_read != offset) {
1728                 device_printf(sc->sc_dev,
1729                     "NVM ACCESS response with invalid offset %d\n",
1730                     offset_read);
1731                 ret = EINVAL;
1732                 goto exit;
1733         }
1734
1735         if (bytes_read > length) {
1736                 device_printf(sc->sc_dev,
1737                     "NVM ACCESS response with too much data "
1738                     "(%d bytes requested, %zd bytes received)\n",
1739                     length, bytes_read);
1740                 ret = EINVAL;
1741                 goto exit;
1742         }
1743
1744         memcpy(data + offset, resp_data, bytes_read);
1745         *len = bytes_read;
1746
1747  exit:
1748         iwm_free_resp(sc, &cmd);
1749         return ret;
1750 }
1751
1752 /*
1753  * Reads an NVM section completely.
1754  * NICs prior to 7000 family don't have a real NVM, but just read
1755  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1756  * by uCode, we need to manually check in this case that we don't
1757  * overflow and try to read more than the EEPROM size.
1758  * For 7000 family NICs, we supply the maximal size we can read, and
1759  * the uCode fills the response with as much data as we can,
1760  * without overflowing, so no check is needed.
1761  */
1762 static int
1763 iwm_nvm_read_section(struct iwm_softc *sc,
1764         uint16_t section, uint8_t *data, uint16_t *len, size_t max_len)
1765 {
1766         uint16_t chunklen, seglen;
1767         int error = 0;
1768
1769         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1770             "reading NVM section %d\n", section);
1771
1772         chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1773         *len = 0;
1774
1775         /* Read NVM chunks until exhausted (reading less than requested) */
1776         while (seglen == chunklen && *len < max_len) {
1777                 error = iwm_nvm_read_chunk(sc,
1778                     section, *len, chunklen, data, &seglen);
1779                 if (error) {
1780                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1781                             "Cannot read from NVM section "
1782                             "%d at offset %d\n", section, *len);
1783                         return error;
1784                 }
1785                 *len += seglen;
1786         }
1787
1788         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1789             "NVM section %d read completed (%d bytes, error=%d)\n",
1790             section, *len, error);
1791         return error;
1792 }
1793
1794 /*
1795  * BEGIN IWM_NVM_PARSE
1796  */
1797
1798 /* iwlwifi/iwl-nvm-parse.c */
1799
1800 /* NVM offsets (in words) definitions */
1801 enum iwm_nvm_offsets {
1802         /* NVM HW-Section offset (in words) definitions */
1803         IWM_HW_ADDR = 0x15,
1804
1805 /* NVM SW-Section offset (in words) definitions */
1806         IWM_NVM_SW_SECTION = 0x1C0,
1807         IWM_NVM_VERSION = 0,
1808         IWM_RADIO_CFG = 1,
1809         IWM_SKU = 2,
1810         IWM_N_HW_ADDRS = 3,
1811         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1812
1813 /* NVM calibration section offset (in words) definitions */
1814         IWM_NVM_CALIB_SECTION = 0x2B8,
1815         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1816 };
1817
1818 enum iwm_8000_nvm_offsets {
1819         /* NVM HW-Section offset (in words) definitions */
1820         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1821         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1822         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1823         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1824         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1825
1826         /* NVM SW-Section offset (in words) definitions */
1827         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1828         IWM_NVM_VERSION_8000 = 0,
1829         IWM_RADIO_CFG_8000 = 0,
1830         IWM_SKU_8000 = 2,
1831         IWM_N_HW_ADDRS_8000 = 3,
1832
1833         /* NVM REGULATORY -Section offset (in words) definitions */
1834         IWM_NVM_CHANNELS_8000 = 0,
1835         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1836         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1837         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1838
1839         /* NVM calibration section offset (in words) definitions */
1840         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1841         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1842 };
1843
1844 /* SKU Capabilities (actual values from NVM definition) */
1845 enum nvm_sku_bits {
1846         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1847         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1848         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1849         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1850 };
1851
1852 /* radio config bits (actual values from NVM definition) */
1853 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1854 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1855 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1856 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1857 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1858 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1859
1860 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1861 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1862 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1863 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1864 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1865 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1866
1867 #define DEFAULT_MAX_TX_POWER 16
1868
1869 /**
1870  * enum iwm_nvm_channel_flags - channel flags in NVM
1871  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1872  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1873  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1874  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1875  * XXX cannot find this (DFS) flag in iwl-nvm-parse.c
1876  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1877  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1878  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1879  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1880  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1881  */
1882 enum iwm_nvm_channel_flags {
1883         IWM_NVM_CHANNEL_VALID = (1 << 0),
1884         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1885         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1886         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1887         IWM_NVM_CHANNEL_DFS = (1 << 7),
1888         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1889         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1890         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1891         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1892 };
1893
1894 /*
1895  * Translate EEPROM flags to net80211.
1896  */
1897 static uint32_t
1898 iwm_eeprom_channel_flags(uint16_t ch_flags)
1899 {
1900         uint32_t nflags;
1901
1902         nflags = 0;
1903         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1904                 nflags |= IEEE80211_CHAN_PASSIVE;
1905         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1906                 nflags |= IEEE80211_CHAN_NOADHOC;
1907         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1908                 nflags |= IEEE80211_CHAN_DFS;
1909                 /* Just in case. */
1910                 nflags |= IEEE80211_CHAN_NOADHOC;
1911         }
1912
1913         return (nflags);
1914 }
1915
1916 static void
1917 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1918     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1919     const uint8_t bands[])
1920 {
1921         const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
1922         uint32_t nflags;
1923         uint16_t ch_flags;
1924         uint8_t ieee;
1925         int error;
1926
1927         for (; ch_idx < ch_num; ch_idx++) {
1928                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1929                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1930                         ieee = iwm_nvm_channels[ch_idx];
1931                 else
1932                         ieee = iwm_nvm_channels_8000[ch_idx];
1933
1934                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1935                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1936                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
1937                             ieee, ch_flags,
1938                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1939                             "5.2" : "2.4");
1940                         continue;
1941                 }
1942
1943                 nflags = iwm_eeprom_channel_flags(ch_flags);
1944                 error = ieee80211_add_channel(chans, maxchans, nchans,
1945                     ieee, 0, 0, nflags, bands);
1946                 if (error != 0)
1947                         break;
1948
1949                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1950                     "Ch. %d Flags %x [%sGHz] - Added\n",
1951                     ieee, ch_flags,
1952                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1953                     "5.2" : "2.4");
1954         }
1955 }
1956
1957 static void
1958 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1959     struct ieee80211_channel chans[])
1960 {
1961         struct iwm_softc *sc = ic->ic_softc;
1962         struct iwm_nvm_data *data = &sc->sc_nvm;
1963         uint8_t bands[IEEE80211_MODE_BYTES];
1964         size_t ch_num;
1965
1966         memset(bands, 0, sizeof(bands));
1967         /* 1-13: 11b/g channels. */
1968         setbit(bands, IEEE80211_MODE_11B);
1969         setbit(bands, IEEE80211_MODE_11G);
1970         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1971             IWM_NUM_2GHZ_CHANNELS - 1, bands);
1972
1973         /* 14: 11b channel only. */
1974         clrbit(bands, IEEE80211_MODE_11G);
1975         iwm_add_channel_band(sc, chans, maxchans, nchans,
1976             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
1977
1978         if (data->sku_cap_band_52GHz_enable) {
1979                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1980                         ch_num = nitems(iwm_nvm_channels);
1981                 else
1982                         ch_num = nitems(iwm_nvm_channels_8000);
1983                 memset(bands, 0, sizeof(bands));
1984                 setbit(bands, IEEE80211_MODE_11A);
1985                 iwm_add_channel_band(sc, chans, maxchans, nchans,
1986                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
1987         }
1988 }
1989
1990 static void
1991 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
1992         const uint16_t *mac_override, const uint16_t *nvm_hw)
1993 {
1994         const uint8_t *hw_addr;
1995
1996         if (mac_override) {
1997                 static const uint8_t reserved_mac[] = {
1998                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
1999                 };
2000
2001                 hw_addr = (const uint8_t *)(mac_override +
2002                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2003
2004                 /*
2005                  * Store the MAC address from MAO section.
2006                  * No byte swapping is required in MAO section
2007                  */
2008                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2009
2010                 /*
2011                  * Force the use of the OTP MAC address in case of reserved MAC
2012                  * address in the NVM, or if address is given but invalid.
2013                  */
2014                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2015                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2016                     iwm_is_valid_ether_addr(data->hw_addr) &&
2017                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2018                         return;
2019
2020                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2021                     "%s: mac address from nvm override section invalid\n",
2022                     __func__);
2023         }
2024
2025         if (nvm_hw) {
2026                 /* read the mac address from WFMP registers */
2027                 uint32_t mac_addr0 =
2028                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2029                 uint32_t mac_addr1 =
2030                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2031
2032                 hw_addr = (const uint8_t *)&mac_addr0;
2033                 data->hw_addr[0] = hw_addr[3];
2034                 data->hw_addr[1] = hw_addr[2];
2035                 data->hw_addr[2] = hw_addr[1];
2036                 data->hw_addr[3] = hw_addr[0];
2037
2038                 hw_addr = (const uint8_t *)&mac_addr1;
2039                 data->hw_addr[4] = hw_addr[1];
2040                 data->hw_addr[5] = hw_addr[0];
2041
2042                 return;
2043         }
2044
2045         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2046         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2047 }
2048
2049 static int
2050 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2051             const uint16_t *phy_sku)
2052 {
2053         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2054                 return le16_to_cpup(nvm_sw + IWM_SKU);
2055
2056         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2057 }
2058
2059 static int
2060 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2061 {
2062         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2063                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2064         else
2065                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2066                                                 IWM_NVM_VERSION_8000));
2067 }
2068
2069 static int
2070 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2071                   const uint16_t *phy_sku)
2072 {
2073         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2074                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2075
2076         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2077 }
2078
2079 static int
2080 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2081 {
2082         int n_hw_addr;
2083
2084         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
2085                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2086
2087         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2088
2089         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2090 }
2091
2092 static void
2093 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2094                   uint32_t radio_cfg)
2095 {
2096         if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2097                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2098                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2099                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2100                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2101                 return;
2102         }
2103
2104         /* set the radio configuration for family 8000 */
2105         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2106         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2107         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2108         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2109         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2110         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2111 }
2112
2113 static int
2114 iwm_parse_nvm_data(struct iwm_softc *sc,
2115                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2116                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2117                    const uint16_t *phy_sku, const uint16_t *regulatory)
2118 {
2119         struct iwm_nvm_data *data = &sc->sc_nvm;
2120         uint8_t hw_addr[IEEE80211_ADDR_LEN];
2121         uint32_t sku, radio_cfg;
2122
2123         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2124
2125         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2126         iwm_set_radio_cfg(sc, data, radio_cfg);
2127
2128         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2129         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2130         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2131         data->sku_cap_11n_enable = 0;
2132
2133         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2134
2135         /* The byte order is little endian 16 bit, meaning 214365 */
2136         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2137                 IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
2138                 data->hw_addr[0] = hw_addr[1];
2139                 data->hw_addr[1] = hw_addr[0];
2140                 data->hw_addr[2] = hw_addr[3];
2141                 data->hw_addr[3] = hw_addr[2];
2142                 data->hw_addr[4] = hw_addr[5];
2143                 data->hw_addr[5] = hw_addr[4];
2144         } else {
2145                 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2146         }
2147
2148         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2149                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2150                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2151         } else {
2152                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2153                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2154         }
2155
2156         return 0;
2157 }
2158
2159 /*
2160  * END NVM PARSE
2161  */
2162
2163 static int
2164 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2165 {
2166         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2167
2168         /* Checking for required sections */
2169         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2170                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2171                     !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2172                         device_printf(sc->sc_dev,
2173                             "Can't parse empty OTP/NVM sections\n");
2174                         return ENOENT;
2175                 }
2176
2177                 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2178         } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2179                 /* SW and REGULATORY sections are mandatory */
2180                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2181                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2182                         device_printf(sc->sc_dev,
2183                             "Can't parse empty OTP/NVM sections\n");
2184                         return ENOENT;
2185                 }
2186                 /* MAC_OVERRIDE or at least HW section must exist */
2187                 if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2188                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2189                         device_printf(sc->sc_dev,
2190                             "Can't parse mac_address, empty sections\n");
2191                         return ENOENT;
2192                 }
2193
2194                 /* PHY_SKU section is mandatory in B0 */
2195                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2196                         device_printf(sc->sc_dev,
2197                             "Can't parse phy_sku in B0, empty sections\n");
2198                         return ENOENT;
2199                 }
2200
2201                 hw = (const uint16_t *)
2202                     sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2203         } else {
2204                 panic("unknown device family %d\n", sc->sc_device_family);
2205         }
2206
2207         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2208         calib = (const uint16_t *)
2209             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2210         regulatory = (const uint16_t *)
2211             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2212         mac_override = (const uint16_t *)
2213             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2214         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2215
2216         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2217             phy_sku, regulatory);
2218 }
2219
2220 static int
2221 iwm_nvm_init(struct iwm_softc *sc)
2222 {
2223         struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2224         int i, section, error;
2225         uint16_t len;
2226         uint8_t *buf;
2227         const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
2228
2229         memset(nvm_sections, 0 , sizeof(nvm_sections));
2230
2231         buf = malloc(bufsz, M_DEVBUF, M_NOWAIT);
2232         if (buf == NULL)
2233                 return ENOMEM;
2234
2235         for (i = 0; i < nitems(nvm_to_read); i++) {
2236                 section = nvm_to_read[i];
2237                 KASSERT(section <= nitems(nvm_sections),
2238                     ("too many sections"));
2239
2240                 error = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2241                 if (error) {
2242                         error = 0;
2243                         continue;
2244                 }
2245                 nvm_sections[section].data = malloc(len, M_DEVBUF, M_NOWAIT);
2246                 if (nvm_sections[section].data == NULL) {
2247                         error = ENOMEM;
2248                         break;
2249                 }
2250                 memcpy(nvm_sections[section].data, buf, len);
2251                 nvm_sections[section].length = len;
2252         }
2253         free(buf, M_DEVBUF);
2254         if (error == 0)
2255                 error = iwm_parse_nvm_sections(sc, nvm_sections);
2256
2257         for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2258                 if (nvm_sections[i].data != NULL)
2259                         free(nvm_sections[i].data, M_DEVBUF);
2260         }
2261
2262         return error;
2263 }
2264
2265 /*
2266  * Firmware loading gunk.  This is kind of a weird hybrid between the
2267  * iwn driver and the Linux iwlwifi driver.
2268  */
2269
2270 static int
2271 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2272         const uint8_t *section, uint32_t byte_cnt)
2273 {
2274         int error = EINVAL;
2275         uint32_t chunk_sz, offset;
2276
2277         chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2278
2279         for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2280                 uint32_t addr, len;
2281                 const uint8_t *data;
2282
2283                 addr = dst_addr + offset;
2284                 len = MIN(chunk_sz, byte_cnt - offset);
2285                 data = section + offset;
2286
2287                 error = iwm_firmware_load_chunk(sc, addr, data, len);
2288                 if (error)
2289                         break;
2290         }
2291
2292         return error;
2293 }
2294
2295 static int
2296 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2297         const uint8_t *chunk, uint32_t byte_cnt)
2298 {
2299         struct iwm_dma_info *dma = &sc->fw_dma;
2300         int error;
2301
2302         /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2303         memcpy(dma->vaddr, chunk, byte_cnt);
2304         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2305
2306         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2307             dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2308                 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2309                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2310         }
2311
2312         sc->sc_fw_chunk_done = 0;
2313
2314         if (!iwm_nic_lock(sc))
2315                 return EBUSY;
2316
2317         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2318             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2319         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2320             dst_addr);
2321         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2322             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2323         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2324             (iwm_get_dma_hi_addr(dma->paddr)
2325               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2326         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2327             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2328             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2329             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2330         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2331             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2332             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2333             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2334
2335         iwm_nic_unlock(sc);
2336
2337         /* wait 1s for this segment to load */
2338         while (!sc->sc_fw_chunk_done)
2339                 if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
2340                         break;
2341
2342         if (!sc->sc_fw_chunk_done) {
2343                 device_printf(sc->sc_dev,
2344                     "fw chunk addr 0x%x len %d failed to load\n",
2345                     dst_addr, byte_cnt);
2346         }
2347
2348         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2349             dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2350                 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2351                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2352                 iwm_nic_unlock(sc);
2353         }
2354
2355         return error;
2356 }
2357
2358 int
2359 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2360     int cpu, int *first_ucode_section)
2361 {
2362         int shift_param;
2363         int i, error = 0, sec_num = 0x1;
2364         uint32_t val, last_read_idx = 0;
2365         const void *data;
2366         uint32_t dlen;
2367         uint32_t offset;
2368
2369         if (cpu == 1) {
2370                 shift_param = 0;
2371                 *first_ucode_section = 0;
2372         } else {
2373                 shift_param = 16;
2374                 (*first_ucode_section)++;
2375         }
2376
2377         for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
2378                 last_read_idx = i;
2379                 data = fws->fw_sect[i].fws_data;
2380                 dlen = fws->fw_sect[i].fws_len;
2381                 offset = fws->fw_sect[i].fws_devoff;
2382
2383                 /*
2384                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2385                  * CPU1 to CPU2.
2386                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2387                  * CPU2 non paged to CPU2 paging sec.
2388                  */
2389                 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2390                     offset == IWM_PAGING_SEPARATOR_SECTION)
2391                         break;
2392
2393                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2394                     "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2395                     i, offset, dlen, cpu);
2396
2397                 if (dlen > sc->sc_fwdmasegsz) {
2398                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2399                             "chunk %d too large (%d bytes)\n", i, dlen);
2400                         error = EFBIG;
2401                 } else {
2402                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2403                 }
2404                 if (error) {
2405                         device_printf(sc->sc_dev,
2406                             "could not load firmware chunk %d (error %d)\n",
2407                             i, error);
2408                         return error;
2409                 }
2410
2411                 /* Notify the ucode of the loaded section number and status */
2412                 if (iwm_nic_lock(sc)) {
2413                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2414                         val = val | (sec_num << shift_param);
2415                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2416                         sec_num = (sec_num << 1) | 0x1;
2417                         iwm_nic_unlock(sc);
2418
2419                         /*
2420                          * The firmware won't load correctly without this delay.
2421                          */
2422                         DELAY(8000);
2423                 }
2424         }
2425
2426         *first_ucode_section = last_read_idx;
2427
2428         if (iwm_nic_lock(sc)) {
2429                 if (cpu == 1)
2430                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2431                 else
2432                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2433                 iwm_nic_unlock(sc);
2434         }
2435
2436         return 0;
2437 }
2438
2439 int
2440 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2441 {
2442         struct iwm_fw_sects *fws;
2443         int error = 0;
2444         int first_ucode_section;
2445
2446         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2447             ucode_type);
2448
2449         fws = &sc->sc_fw.fw_sects[ucode_type];
2450
2451         /* configure the ucode to be ready to get the secured image */
2452         /* release CPU reset */
2453         iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2454
2455         /* load to FW the binary Secured sections of CPU1 */
2456         error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2457         if (error)
2458                 return error;
2459
2460         /* load to FW the binary sections of CPU2 */
2461         return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2462 }
2463
2464 static int
2465 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2466 {
2467         struct iwm_fw_sects *fws;
2468         int error, i;
2469         const void *data;
2470         uint32_t dlen;
2471         uint32_t offset;
2472
2473         sc->sc_uc.uc_intr = 0;
2474
2475         fws = &sc->sc_fw.fw_sects[ucode_type];
2476         for (i = 0; i < fws->fw_count; i++) {
2477                 data = fws->fw_sect[i].fws_data;
2478                 dlen = fws->fw_sect[i].fws_len;
2479                 offset = fws->fw_sect[i].fws_devoff;
2480                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2481                     "LOAD FIRMWARE type %d offset %u len %d\n",
2482                     ucode_type, offset, dlen);
2483                 if (dlen > sc->sc_fwdmasegsz) {
2484                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2485                             "chunk %d too large (%d bytes)\n", i, dlen);
2486                         error = EFBIG;
2487                 } else {
2488                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2489                 }
2490                 if (error) {
2491                         device_printf(sc->sc_dev,
2492                             "could not load firmware chunk %u of %u "
2493                             "(error=%d)\n", i, fws->fw_count, error);
2494                         return error;
2495                 }
2496         }
2497
2498         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2499
2500         return 0;
2501 }
2502
2503 static int
2504 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2505 {
2506         int error, w;
2507
2508         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
2509                 error = iwm_load_firmware_8000(sc, ucode_type);
2510         else
2511                 error = iwm_load_firmware_7000(sc, ucode_type);
2512         if (error)
2513                 return error;
2514
2515         /* wait for the firmware to load */
2516         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2517                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2518         }
2519         if (error || !sc->sc_uc.uc_ok) {
2520                 device_printf(sc->sc_dev, "could not load firmware\n");
2521                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2522                         device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2523                             iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2524                         device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2525                             iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2526                 }
2527         }
2528
2529         /*
2530          * Give the firmware some time to initialize.
2531          * Accessing it too early causes errors.
2532          */
2533         msleep(&w, &sc->sc_mtx, 0, "iwmfwinit", hz);
2534
2535         return error;
2536 }
2537
2538 /* iwlwifi: pcie/trans.c */
2539 static int
2540 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2541 {
2542         int error;
2543
2544         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2545
2546         if ((error = iwm_nic_init(sc)) != 0) {
2547                 device_printf(sc->sc_dev, "unable to init nic\n");
2548                 return error;
2549         }
2550
2551         /* make sure rfkill handshake bits are cleared */
2552         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2553         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2554             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2555
2556         /* clear (again), then enable host interrupts */
2557         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2558         iwm_enable_interrupts(sc);
2559
2560         /* really make sure rfkill handshake bits are cleared */
2561         /* maybe we should write a few times more?  just to make sure */
2562         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2563         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2564
2565         /* Load the given image to the HW */
2566         return iwm_load_firmware(sc, ucode_type);
2567 }
2568
2569 static int
2570 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2571 {
2572         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2573                 .valid = htole32(valid_tx_ant),
2574         };
2575
2576         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2577             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2578 }
2579
2580 /* iwlwifi: mvm/fw.c */
2581 static int
2582 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2583 {
2584         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2585         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2586
2587         /* Set parameters */
2588         phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2589         phy_cfg_cmd.calib_control.event_trigger =
2590             sc->sc_default_calib[ucode_type].event_trigger;
2591         phy_cfg_cmd.calib_control.flow_trigger =
2592             sc->sc_default_calib[ucode_type].flow_trigger;
2593
2594         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2595             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2596         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2597             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2598 }
2599
2600 static int
2601 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2602         enum iwm_ucode_type ucode_type)
2603 {
2604         enum iwm_ucode_type old_type = sc->sc_uc_current;
2605         int error;
2606
2607         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2608                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2609                         error);
2610                 return error;
2611         }
2612
2613         sc->sc_uc_current = ucode_type;
2614         error = iwm_start_fw(sc, ucode_type);
2615         if (error) {
2616                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2617                 sc->sc_uc_current = old_type;
2618                 return error;
2619         }
2620
2621         error = iwm_post_alive(sc);
2622         if (error) {
2623                 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2624         }
2625         return error;
2626 }
2627
2628 /*
2629  * mvm misc bits
2630  */
2631
2632 /*
2633  * follows iwlwifi/fw.c
2634  */
2635 static int
2636 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2637 {
2638         int error;
2639
2640         /* do not operate with rfkill switch turned on */
2641         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2642                 device_printf(sc->sc_dev,
2643                     "radio is disabled by hardware switch\n");
2644                 return EPERM;
2645         }
2646
2647         sc->sc_init_complete = 0;
2648         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2649             IWM_UCODE_TYPE_INIT)) != 0) {
2650                 device_printf(sc->sc_dev, "failed to load init firmware\n");
2651                 return error;
2652         }
2653
2654         if (justnvm) {
2655                 if ((error = iwm_nvm_init(sc)) != 0) {
2656                         device_printf(sc->sc_dev, "failed to read nvm\n");
2657                         return error;
2658                 }
2659                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2660
2661                 return 0;
2662         }
2663
2664         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
2665                 device_printf(sc->sc_dev,
2666                     "failed to send bt coex configuration: %d\n", error);
2667                 return error;
2668         }
2669
2670         /* Init Smart FIFO. */
2671         error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2672         if (error != 0)
2673                 return error;
2674
2675         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2676             "%s: phy_txant=0x%08x, nvm_valid_tx_ant=0x%02x, valid=0x%02x\n",
2677             __func__,
2678             ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2679               >> IWM_FW_PHY_CFG_TX_CHAIN_POS),
2680             sc->sc_nvm.valid_tx_ant,
2681             iwm_fw_valid_tx_ant(sc));
2682
2683
2684         /* Send TX valid antennas before triggering calibrations */
2685         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
2686                 device_printf(sc->sc_dev,
2687                     "failed to send antennas before calibration: %d\n", error);
2688                 return error;
2689         }
2690
2691         /*
2692          * Send phy configurations command to init uCode
2693          * to start the 16.0 uCode init image internal calibrations.
2694          */
2695         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2696                 device_printf(sc->sc_dev,
2697                     "%s: failed to run internal calibration: %d\n",
2698                     __func__, error);
2699                 return error;
2700         }
2701
2702         /*
2703          * Nothing to do but wait for the init complete notification
2704          * from the firmware
2705          */
2706         while (!sc->sc_init_complete) {
2707                 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2708                                  0, "iwminit", 2*hz);
2709                 if (error) {
2710                         device_printf(sc->sc_dev, "init complete failed: %d\n",
2711                                 sc->sc_init_complete);
2712                         break;
2713                 }
2714         }
2715
2716         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
2717             sc->sc_init_complete ? "" : "not ");
2718
2719         return error;
2720 }
2721
2722 /*
2723  * receive side
2724  */
2725
2726 /* (re)stock rx ring, called at init-time and at runtime */
2727 static int
2728 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2729 {
2730         struct iwm_rx_ring *ring = &sc->rxq;
2731         struct iwm_rx_data *data = &ring->data[idx];
2732         struct mbuf *m;
2733         bus_dmamap_t dmamap = NULL;
2734         bus_dma_segment_t seg;
2735         int nsegs, error;
2736
2737         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2738         if (m == NULL)
2739                 return ENOBUFS;
2740
2741         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2742         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2743             &seg, &nsegs, BUS_DMA_NOWAIT);
2744         if (error != 0) {
2745                 device_printf(sc->sc_dev,
2746                     "%s: can't map mbuf, error %d\n", __func__, error);
2747                 goto fail;
2748         }
2749
2750         if (data->m != NULL)
2751                 bus_dmamap_unload(ring->data_dmat, data->map);
2752
2753         /* Swap ring->spare_map with data->map */
2754         dmamap = data->map;
2755         data->map = ring->spare_map;
2756         ring->spare_map = dmamap;
2757
2758         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2759         data->m = m;
2760
2761         /* Update RX descriptor. */
2762         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
2763         ring->desc[idx] = htole32(seg.ds_addr >> 8);
2764         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2765             BUS_DMASYNC_PREWRITE);
2766
2767         return 0;
2768 fail:
2769         m_freem(m);
2770         return error;
2771 }
2772
2773 /* iwlwifi: mvm/rx.c */
2774 #define IWM_RSSI_OFFSET 50
2775 static int
2776 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2777 {
2778         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2779         uint32_t agc_a, agc_b;
2780         uint32_t val;
2781
2782         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2783         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2784         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2785
2786         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2787         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2788         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2789
2790         /*
2791          * dBm = rssi dB - agc dB - constant.
2792          * Higher AGC (higher radio gain) means lower signal.
2793          */
2794         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2795         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2796         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2797
2798         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2799             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2800             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2801
2802         return max_rssi_dbm;
2803 }
2804
2805 /* iwlwifi: mvm/rx.c */
2806 /*
2807  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2808  * values are reported by the fw as positive values - need to negate
2809  * to obtain their dBM.  Account for missing antennas by replacing 0
2810  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2811  */
2812 static int
2813 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2814 {
2815         int energy_a, energy_b, energy_c, max_energy;
2816         uint32_t val;
2817
2818         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2819         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2820             IWM_RX_INFO_ENERGY_ANT_A_POS;
2821         energy_a = energy_a ? -energy_a : -256;
2822         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2823             IWM_RX_INFO_ENERGY_ANT_B_POS;
2824         energy_b = energy_b ? -energy_b : -256;
2825         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2826             IWM_RX_INFO_ENERGY_ANT_C_POS;
2827         energy_c = energy_c ? -energy_c : -256;
2828         max_energy = MAX(energy_a, energy_b);
2829         max_energy = MAX(max_energy, energy_c);
2830
2831         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2832             "energy In A %d B %d C %d , and max %d\n",
2833             energy_a, energy_b, energy_c, max_energy);
2834
2835         return max_energy;
2836 }
2837
2838 static void
2839 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2840         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2841 {
2842         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2843
2844         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2845         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2846
2847         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2848 }
2849
2850 /*
2851  * Retrieve the average noise (in dBm) among receivers.
2852  */
2853 static int
2854 iwm_get_noise(struct iwm_softc *sc,
2855     const struct iwm_mvm_statistics_rx_non_phy *stats)
2856 {
2857         int i, total, nbant, noise;
2858
2859         total = nbant = noise = 0;
2860         for (i = 0; i < 3; i++) {
2861                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2862                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
2863                     __func__,
2864                     i,
2865                     noise);
2866
2867                 if (noise) {
2868                         total += noise;
2869                         nbant++;
2870                 }
2871         }
2872
2873         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
2874             __func__, nbant, total);
2875 #if 0
2876         /* There should be at least one antenna but check anyway. */
2877         return (nbant == 0) ? -127 : (total / nbant) - 107;
2878 #else
2879         /* For now, just hard-code it to -96 to be safe */
2880         return (-96);
2881 #endif
2882 }
2883
2884 /*
2885  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2886  *
2887  * Handles the actual data of the Rx packet from the fw
2888  */
2889 static void
2890 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2891         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2892 {
2893         struct ieee80211com *ic = &sc->sc_ic;
2894         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2895         struct ieee80211_frame *wh;
2896         struct ieee80211_node *ni;
2897         struct ieee80211_rx_stats rxs;
2898         struct mbuf *m;
2899         struct iwm_rx_phy_info *phy_info;
2900         struct iwm_rx_mpdu_res_start *rx_res;
2901         uint32_t len;
2902         uint32_t rx_pkt_status;
2903         int rssi;
2904
2905         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2906
2907         phy_info = &sc->sc_last_phy_info;
2908         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2909         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2910         len = le16toh(rx_res->byte_count);
2911         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2912
2913         m = data->m;
2914         m->m_data = pkt->data + sizeof(*rx_res);
2915         m->m_pkthdr.len = m->m_len = len;
2916
2917         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2918                 device_printf(sc->sc_dev,
2919                     "dsp size out of range [0,20]: %d\n",
2920                     phy_info->cfg_phy_cnt);
2921                 return;
2922         }
2923
2924         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2925             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2926                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2927                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2928                 return; /* drop */
2929         }
2930
2931         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2932                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2933         } else {
2934                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
2935         }
2936
2937         /* Note: RSSI is absolute (ie a -ve value) */
2938         if (rssi < IWM_MIN_DBM)
2939                 rssi = IWM_MIN_DBM;
2940         else if (rssi > IWM_MAX_DBM)
2941                 rssi = IWM_MAX_DBM;
2942
2943         /* Map it to relative value */
2944         rssi = rssi - sc->sc_noise;
2945
2946         /* replenish ring for the buffer we're going to feed to the sharks */
2947         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
2948                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
2949                     __func__);
2950                 return;
2951         }
2952
2953         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2954             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
2955
2956         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2957
2958         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2959             "%s: phy_info: channel=%d, flags=0x%08x\n",
2960             __func__,
2961             le16toh(phy_info->channel),
2962             le16toh(phy_info->phy_flags));
2963
2964         /*
2965          * Populate an RX state struct with the provided information.
2966          */
2967         bzero(&rxs, sizeof(rxs));
2968         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
2969         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
2970         rxs.c_ieee = le16toh(phy_info->channel);
2971         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2972                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
2973         } else {
2974                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
2975         }
2976
2977         /* rssi is in 1/2db units */
2978         rxs.rssi = rssi * 2;
2979         rxs.nf = sc->sc_noise;
2980
2981         if (ieee80211_radiotap_active_vap(vap)) {
2982                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
2983
2984                 tap->wr_flags = 0;
2985                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
2986                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2987                 tap->wr_chan_freq = htole16(rxs.c_freq);
2988                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
2989                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
2990                 tap->wr_dbm_antsignal = (int8_t)rssi;
2991                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
2992                 tap->wr_tsft = phy_info->system_timestamp;
2993                 switch (phy_info->rate) {
2994                 /* CCK rates. */
2995                 case  10: tap->wr_rate =   2; break;
2996                 case  20: tap->wr_rate =   4; break;
2997                 case  55: tap->wr_rate =  11; break;
2998                 case 110: tap->wr_rate =  22; break;
2999                 /* OFDM rates. */
3000                 case 0xd: tap->wr_rate =  12; break;
3001                 case 0xf: tap->wr_rate =  18; break;
3002                 case 0x5: tap->wr_rate =  24; break;
3003                 case 0x7: tap->wr_rate =  36; break;
3004                 case 0x9: tap->wr_rate =  48; break;
3005                 case 0xb: tap->wr_rate =  72; break;
3006                 case 0x1: tap->wr_rate =  96; break;
3007                 case 0x3: tap->wr_rate = 108; break;
3008                 /* Unknown rate: should not happen. */
3009                 default:  tap->wr_rate =   0;
3010                 }
3011         }
3012
3013         IWM_UNLOCK(sc);
3014         if (ni != NULL) {
3015                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3016                 ieee80211_input_mimo(ni, m, &rxs);
3017                 ieee80211_free_node(ni);
3018         } else {
3019                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3020                 ieee80211_input_mimo_all(ic, m, &rxs);
3021         }
3022         IWM_LOCK(sc);
3023 }
3024
3025 static int
3026 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3027         struct iwm_node *in)
3028 {
3029         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3030         struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3031         struct ieee80211_node *ni = &in->in_ni;
3032         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3033
3034         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3035
3036         /* Update rate control statistics. */
3037         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3038             __func__,
3039             (int) le16toh(tx_resp->status.status),
3040             (int) le16toh(tx_resp->status.sequence),
3041             tx_resp->frame_count,
3042             tx_resp->bt_kill_count,
3043             tx_resp->failure_rts,
3044             tx_resp->failure_frame,
3045             le32toh(tx_resp->initial_rate),
3046             (int) le16toh(tx_resp->wireless_media_time));
3047
3048         txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3049                      IEEE80211_RATECTL_STATUS_LONG_RETRY;
3050         txs->short_retries = tx_resp->failure_rts;
3051         txs->long_retries = tx_resp->failure_frame;
3052         if (status != IWM_TX_STATUS_SUCCESS &&
3053             status != IWM_TX_STATUS_DIRECT_DONE) {
3054                 switch (status) {
3055                 case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3056                         txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3057                         break;
3058                 case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3059                         txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3060                         break;
3061                 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3062                         txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3063                         break;
3064                 default:
3065                         txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3066                         break;
3067                 }
3068         } else {
3069                 txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3070         }
3071         ieee80211_ratectl_tx_complete(ni, txs);
3072
3073         return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3074 }
3075
3076 static void
3077 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3078         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3079 {
3080         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3081         int idx = cmd_hdr->idx;
3082         int qid = cmd_hdr->qid;
3083         struct iwm_tx_ring *ring = &sc->txq[qid];
3084         struct iwm_tx_data *txd = &ring->data[idx];
3085         struct iwm_node *in = txd->in;
3086         struct mbuf *m = txd->m;
3087         int status;
3088
3089         KASSERT(txd->done == 0, ("txd not done"));
3090         KASSERT(txd->in != NULL, ("txd without node"));
3091         KASSERT(txd->m != NULL, ("txd without mbuf"));
3092
3093         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3094
3095         sc->sc_tx_timer = 0;
3096
3097         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3098
3099         /* Unmap and free mbuf. */
3100         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3101         bus_dmamap_unload(ring->data_dmat, txd->map);
3102
3103         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3104             "free txd %p, in %p\n", txd, txd->in);
3105         txd->done = 1;
3106         txd->m = NULL;
3107         txd->in = NULL;
3108
3109         ieee80211_tx_complete(&in->in_ni, m, status);
3110
3111         if (--ring->queued < IWM_TX_RING_LOMARK) {
3112                 sc->qfullmsk &= ~(1 << ring->qid);
3113                 if (sc->qfullmsk == 0) {
3114                         /*
3115                          * Well, we're in interrupt context, but then again
3116                          * I guess net80211 does all sorts of stunts in
3117                          * interrupt context, so maybe this is no biggie.
3118                          */
3119                         iwm_start(sc);
3120                 }
3121         }
3122 }
3123
3124 /*
3125  * transmit side
3126  */
3127
3128 /*
3129  * Process a "command done" firmware notification.  This is where we wakeup
3130  * processes waiting for a synchronous command completion.
3131  * from if_iwn
3132  */
3133 static void
3134 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3135 {
3136         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3137         struct iwm_tx_data *data;
3138
3139         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3140                 return; /* Not a command ack. */
3141         }
3142
3143         /* XXX wide commands? */
3144         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3145             "cmd notification type 0x%x qid %d idx %d\n",
3146             pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3147
3148         data = &ring->data[pkt->hdr.idx];
3149
3150         /* If the command was mapped in an mbuf, free it. */
3151         if (data->m != NULL) {
3152                 bus_dmamap_sync(ring->data_dmat, data->map,
3153                     BUS_DMASYNC_POSTWRITE);
3154                 bus_dmamap_unload(ring->data_dmat, data->map);
3155                 m_freem(data->m);
3156                 data->m = NULL;
3157         }
3158         wakeup(&ring->desc[pkt->hdr.idx]);
3159 }
3160
3161 #if 0
3162 /*
3163  * necessary only for block ack mode
3164  */
3165 void
3166 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3167         uint16_t len)
3168 {
3169         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3170         uint16_t w_val;
3171
3172         scd_bc_tbl = sc->sched_dma.vaddr;
3173
3174         len += 8; /* magic numbers came naturally from paris */
3175         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3176                 len = roundup(len, 4) / 4;
3177
3178         w_val = htole16(sta_id << 12 | len);
3179
3180         /* Update TX scheduler. */
3181         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3182         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3183             BUS_DMASYNC_PREWRITE);
3184
3185         /* I really wonder what this is ?!? */
3186         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3187                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3188                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3189                     BUS_DMASYNC_PREWRITE);
3190         }
3191 }
3192 #endif
3193
3194 /*
3195  * Take an 802.11 (non-n) rate, find the relevant rate
3196  * table entry.  return the index into in_ridx[].
3197  *
3198  * The caller then uses that index back into in_ridx
3199  * to figure out the rate index programmed /into/
3200  * the firmware for this given node.
3201  */
3202 static int
3203 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3204     uint8_t rate)
3205 {
3206         int i;
3207         uint8_t r;
3208
3209         for (i = 0; i < nitems(in->in_ridx); i++) {
3210                 r = iwm_rates[in->in_ridx[i]].rate;
3211                 if (rate == r)
3212                         return (i);
3213         }
3214
3215         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3216             "%s: couldn't find an entry for rate=%d\n",
3217             __func__,
3218             rate);
3219
3220         /* XXX Return the first */
3221         /* XXX TODO: have it return the /lowest/ */
3222         return (0);
3223 }
3224
3225 static int
3226 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3227 {
3228         int i;
3229
3230         for (i = 0; i < nitems(iwm_rates); i++) {
3231                 if (iwm_rates[i].rate == rate)
3232                         return (i);
3233         }
3234         /* XXX error? */
3235         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3236             "%s: couldn't find an entry for rate=%d\n",
3237             __func__,
3238             rate);
3239         return (0);
3240 }
3241
3242 /*
3243  * Fill in the rate related information for a transmit command.
3244  */
3245 static const struct iwm_rate *
3246 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3247         struct mbuf *m, struct iwm_tx_cmd *tx)
3248 {
3249         struct ieee80211_node *ni = &in->in_ni;
3250         struct ieee80211_frame *wh;
3251         const struct ieee80211_txparam *tp = ni->ni_txparms;
3252         const struct iwm_rate *rinfo;
3253         int type;
3254         int ridx, rate_flags;
3255
3256         wh = mtod(m, struct ieee80211_frame *);
3257         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3258
3259         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3260         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3261
3262         if (type == IEEE80211_FC0_TYPE_MGT) {
3263                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3264                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3265                     "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3266         } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3267                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3268                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3269                     "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3270         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3271                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3272                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3273                     "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3274         } else if (m->m_flags & M_EAPOL) {
3275                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3276                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3277                     "%s: EAPOL\n", __func__);
3278         } else if (type == IEEE80211_FC0_TYPE_DATA) {
3279                 int i;
3280
3281                 /* for data frames, use RS table */
3282                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3283                 /* XXX pass pktlen */
3284                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3285                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3286                 ridx = in->in_ridx[i];
3287
3288                 /* This is the index into the programmed table */
3289                 tx->initial_rate_index = i;
3290                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3291
3292                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3293                     "%s: start with i=%d, txrate %d\n",
3294                     __func__, i, iwm_rates[ridx].rate);
3295         } else {
3296                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3297                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3298                     __func__, tp->mgmtrate);
3299         }
3300
3301         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3302             "%s: frame type=%d txrate %d\n",
3303                 __func__, type, iwm_rates[ridx].rate);
3304
3305         rinfo = &iwm_rates[ridx];
3306
3307         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3308             __func__, ridx,
3309             rinfo->rate,
3310             !! (IWM_RIDX_IS_CCK(ridx))
3311             );
3312
3313         /* XXX TODO: hard-coded TX antenna? */
3314         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3315         if (IWM_RIDX_IS_CCK(ridx))
3316                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3317         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3318
3319         return rinfo;
3320 }
3321
3322 #define TB0_SIZE 16
3323 static int
3324 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3325 {
3326         struct ieee80211com *ic = &sc->sc_ic;
3327         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3328         struct iwm_node *in = IWM_NODE(ni);
3329         struct iwm_tx_ring *ring;
3330         struct iwm_tx_data *data;
3331         struct iwm_tfd *desc;
3332         struct iwm_device_cmd *cmd;
3333         struct iwm_tx_cmd *tx;
3334         struct ieee80211_frame *wh;
3335         struct ieee80211_key *k = NULL;
3336         struct mbuf *m1;
3337         const struct iwm_rate *rinfo;
3338         uint32_t flags;
3339         u_int hdrlen;
3340         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3341         int nsegs;
3342         uint8_t tid, type;
3343         int i, totlen, error, pad;
3344
3345         wh = mtod(m, struct ieee80211_frame *);
3346         hdrlen = ieee80211_anyhdrsize(wh);
3347         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3348         tid = 0;
3349         ring = &sc->txq[ac];
3350         desc = &ring->desc[ring->cur];
3351         memset(desc, 0, sizeof(*desc));
3352         data = &ring->data[ring->cur];
3353
3354         /* Fill out iwm_tx_cmd to send to the firmware */
3355         cmd = &ring->cmd[ring->cur];
3356         cmd->hdr.code = IWM_TX_CMD;
3357         cmd->hdr.flags = 0;
3358         cmd->hdr.qid = ring->qid;
3359         cmd->hdr.idx = ring->cur;
3360
3361         tx = (void *)cmd->data;
3362         memset(tx, 0, sizeof(*tx));
3363
3364         rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3365
3366         /* Encrypt the frame if need be. */
3367         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3368                 /* Retrieve key for TX && do software encryption. */
3369                 k = ieee80211_crypto_encap(ni, m);
3370                 if (k == NULL) {
3371                         m_freem(m);
3372                         return (ENOBUFS);
3373                 }
3374                 /* 802.11 header may have moved. */
3375                 wh = mtod(m, struct ieee80211_frame *);
3376         }
3377
3378         if (ieee80211_radiotap_active_vap(vap)) {
3379                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3380
3381                 tap->wt_flags = 0;
3382                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3383                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3384                 tap->wt_rate = rinfo->rate;
3385                 if (k != NULL)
3386                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3387                 ieee80211_radiotap_tx(vap, m);
3388         }
3389
3390
3391         totlen = m->m_pkthdr.len;
3392
3393         flags = 0;
3394         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3395                 flags |= IWM_TX_CMD_FLG_ACK;
3396         }
3397
3398         if (type == IEEE80211_FC0_TYPE_DATA
3399             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3400             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3401                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3402         }
3403
3404         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3405             type != IEEE80211_FC0_TYPE_DATA)
3406                 tx->sta_id = sc->sc_aux_sta.sta_id;
3407         else
3408                 tx->sta_id = IWM_STATION_ID;
3409
3410         if (type == IEEE80211_FC0_TYPE_MGT) {
3411                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3412
3413                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3414                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3415                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3416                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3417                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3418                 } else {
3419                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3420                 }
3421         } else {
3422                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3423         }
3424
3425         if (hdrlen & 3) {
3426                 /* First segment length must be a multiple of 4. */
3427                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3428                 pad = 4 - (hdrlen & 3);
3429         } else
3430                 pad = 0;
3431
3432         tx->driver_txop = 0;
3433         tx->next_frame_len = 0;
3434
3435         tx->len = htole16(totlen);
3436         tx->tid_tspec = tid;
3437         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3438
3439         /* Set physical address of "scratch area". */
3440         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3441         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3442
3443         /* Copy 802.11 header in TX command. */
3444         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3445
3446         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3447
3448         tx->sec_ctl = 0;
3449         tx->tx_flags |= htole32(flags);
3450
3451         /* Trim 802.11 header. */
3452         m_adj(m, hdrlen);
3453         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3454             segs, &nsegs, BUS_DMA_NOWAIT);
3455         if (error != 0) {
3456                 if (error != EFBIG) {
3457                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3458                             error);
3459                         m_freem(m);
3460                         return error;
3461                 }
3462                 /* Too many DMA segments, linearize mbuf. */
3463                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3464                 if (m1 == NULL) {
3465                         device_printf(sc->sc_dev,
3466                             "%s: could not defrag mbuf\n", __func__);
3467                         m_freem(m);
3468                         return (ENOBUFS);
3469                 }
3470                 m = m1;
3471
3472                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3473                     segs, &nsegs, BUS_DMA_NOWAIT);
3474                 if (error != 0) {
3475                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3476                             error);
3477                         m_freem(m);
3478                         return error;
3479                 }
3480         }
3481         data->m = m;
3482         data->in = in;
3483         data->done = 0;
3484
3485         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3486             "sending txd %p, in %p\n", data, data->in);
3487         KASSERT(data->in != NULL, ("node is NULL"));
3488
3489         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3490             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3491             ring->qid, ring->cur, totlen, nsegs,
3492             le32toh(tx->tx_flags),
3493             le32toh(tx->rate_n_flags),
3494             tx->initial_rate_index
3495             );
3496
3497         /* Fill TX descriptor. */
3498         desc->num_tbs = 2 + nsegs;
3499
3500         desc->tbs[0].lo = htole32(data->cmd_paddr);
3501         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3502             (TB0_SIZE << 4);
3503         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3504         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3505             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3506               + hdrlen + pad - TB0_SIZE) << 4);
3507
3508         /* Other DMA segments are for data payload. */
3509         for (i = 0; i < nsegs; i++) {
3510                 seg = &segs[i];
3511                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3512                 desc->tbs[i+2].hi_n_len = \
3513                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3514                     | ((seg->ds_len) << 4);
3515         }
3516
3517         bus_dmamap_sync(ring->data_dmat, data->map,
3518             BUS_DMASYNC_PREWRITE);
3519         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3520             BUS_DMASYNC_PREWRITE);
3521         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3522             BUS_DMASYNC_PREWRITE);
3523
3524 #if 0
3525         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3526 #endif
3527
3528         /* Kick TX ring. */
3529         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3530         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3531
3532         /* Mark TX ring as full if we reach a certain threshold. */
3533         if (++ring->queued > IWM_TX_RING_HIMARK) {
3534                 sc->qfullmsk |= 1 << ring->qid;
3535         }
3536
3537         return 0;
3538 }
3539
3540 static int
3541 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3542     const struct ieee80211_bpf_params *params)
3543 {
3544         struct ieee80211com *ic = ni->ni_ic;
3545         struct iwm_softc *sc = ic->ic_softc;
3546         int error = 0;
3547
3548         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3549             "->%s begin\n", __func__);
3550
3551         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3552                 m_freem(m);
3553                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3554                     "<-%s not RUNNING\n", __func__);
3555                 return (ENETDOWN);
3556         }
3557
3558         IWM_LOCK(sc);
3559         /* XXX fix this */
3560         if (params == NULL) {
3561                 error = iwm_tx(sc, m, ni, 0);
3562         } else {
3563                 error = iwm_tx(sc, m, ni, 0);
3564         }
3565         sc->sc_tx_timer = 5;
3566         IWM_UNLOCK(sc);
3567
3568         return (error);
3569 }
3570
3571 /*
3572  * mvm/tx.c
3573  */
3574
3575 /*
3576  * Note that there are transports that buffer frames before they reach
3577  * the firmware. This means that after flush_tx_path is called, the
3578  * queue might not be empty. The race-free way to handle this is to:
3579  * 1) set the station as draining
3580  * 2) flush the Tx path
3581  * 3) wait for the transport queues to be empty
3582  */
3583 int
3584 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3585 {
3586         int ret;
3587         struct iwm_tx_path_flush_cmd flush_cmd = {
3588                 .queues_ctl = htole32(tfd_msk),
3589                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3590         };
3591
3592         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3593             sizeof(flush_cmd), &flush_cmd);
3594         if (ret)
3595                 device_printf(sc->sc_dev,
3596                     "Flushing tx queue failed: %d\n", ret);
3597         return ret;
3598 }
3599
3600 /*
3601  * BEGIN mvm/sta.c
3602  */
3603
3604 static int
3605 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3606         struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3607 {
3608         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3609             cmd, status);
3610 }
3611
3612 /* send station add/update command to firmware */
3613 static int
3614 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3615 {
3616         struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3617         int ret;
3618         uint32_t status;
3619
3620         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3621
3622         add_sta_cmd.sta_id = IWM_STATION_ID;
3623         add_sta_cmd.mac_id_n_color
3624             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3625                 IWM_DEFAULT_COLOR));
3626         if (!update) {
3627                 int ac;
3628                 for (ac = 0; ac < WME_NUM_AC; ac++) {
3629                         add_sta_cmd.tfd_queue_msk |=
3630                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3631                 }
3632                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3633         }
3634         add_sta_cmd.add_modify = update ? 1 : 0;
3635         add_sta_cmd.station_flags_msk
3636             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3637         add_sta_cmd.tid_disable_tx = htole16(0xffff);
3638         if (update)
3639                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3640
3641         status = IWM_ADD_STA_SUCCESS;
3642         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3643         if (ret)
3644                 return ret;
3645
3646         switch (status) {
3647         case IWM_ADD_STA_SUCCESS:
3648                 break;
3649         default:
3650                 ret = EIO;
3651                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3652                 break;
3653         }
3654
3655         return ret;
3656 }
3657
3658 static int
3659 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3660 {
3661         return iwm_mvm_sta_send_to_fw(sc, in, 0);
3662 }
3663
3664 static int
3665 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3666 {
3667         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3668 }
3669
3670 static int
3671 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3672         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3673 {
3674         struct iwm_mvm_add_sta_cmd_v7 cmd;
3675         int ret;
3676         uint32_t status;
3677
3678         memset(&cmd, 0, sizeof(cmd));
3679         cmd.sta_id = sta->sta_id;
3680         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3681
3682         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3683         cmd.tid_disable_tx = htole16(0xffff);
3684
3685         if (addr)
3686                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3687
3688         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3689         if (ret)
3690                 return ret;
3691
3692         switch (status) {
3693         case IWM_ADD_STA_SUCCESS:
3694                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3695                     "%s: Internal station added.\n", __func__);
3696                 return 0;
3697         default:
3698                 device_printf(sc->sc_dev,
3699                     "%s: Add internal station failed, status=0x%x\n",
3700                     __func__, status);
3701                 ret = EIO;
3702                 break;
3703         }
3704         return ret;
3705 }
3706
3707 static int
3708 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3709 {
3710         int ret;
3711
3712         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3713         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3714
3715         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3716         if (ret)
3717                 return ret;
3718
3719         ret = iwm_mvm_add_int_sta_common(sc,
3720             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3721
3722         if (ret)
3723                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3724         return ret;
3725 }
3726
3727 /*
3728  * END mvm/sta.c
3729  */
3730
3731 /*
3732  * BEGIN mvm/quota.c
3733  */
3734
3735 static int
3736 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3737 {
3738         struct iwm_time_quota_cmd cmd;
3739         int i, idx, ret, num_active_macs, quota, quota_rem;
3740         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3741         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3742         uint16_t id;
3743
3744         memset(&cmd, 0, sizeof(cmd));
3745
3746         /* currently, PHY ID == binding ID */
3747         if (in) {
3748                 id = in->in_phyctxt->id;
3749                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3750                 colors[id] = in->in_phyctxt->color;
3751
3752                 if (1)
3753                         n_ifs[id] = 1;
3754         }
3755
3756         /*
3757          * The FW's scheduling session consists of
3758          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3759          * equally between all the bindings that require quota
3760          */
3761         num_active_macs = 0;
3762         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3763                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3764                 num_active_macs += n_ifs[i];
3765         }
3766
3767         quota = 0;
3768         quota_rem = 0;
3769         if (num_active_macs) {
3770                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3771                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3772         }
3773
3774         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3775                 if (colors[i] < 0)
3776                         continue;
3777
3778                 cmd.quotas[idx].id_and_color =
3779                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3780
3781                 if (n_ifs[i] <= 0) {
3782                         cmd.quotas[idx].quota = htole32(0);
3783                         cmd.quotas[idx].max_duration = htole32(0);
3784                 } else {
3785                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3786                         cmd.quotas[idx].max_duration = htole32(0);
3787                 }
3788                 idx++;
3789         }
3790
3791         /* Give the remainder of the session to the first binding */
3792         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3793
3794         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3795             sizeof(cmd), &cmd);
3796         if (ret)
3797                 device_printf(sc->sc_dev,
3798                     "%s: Failed to send quota: %d\n", __func__, ret);
3799         return ret;
3800 }
3801
3802 /*
3803  * END mvm/quota.c
3804  */
3805
3806 /*
3807  * ieee80211 routines
3808  */
3809
3810 /*
3811  * Change to AUTH state in 80211 state machine.  Roughly matches what
3812  * Linux does in bss_info_changed().
3813  */
3814 static int
3815 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3816 {
3817         struct ieee80211_node *ni;
3818         struct iwm_node *in;
3819         struct iwm_vap *iv = IWM_VAP(vap);
3820         uint32_t duration;
3821         int error;
3822
3823         /*
3824          * XXX i have a feeling that the vap node is being
3825          * freed from underneath us. Grr.
3826          */
3827         ni = ieee80211_ref_node(vap->iv_bss);
3828         in = IWM_NODE(ni);
3829         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3830             "%s: called; vap=%p, bss ni=%p\n",
3831             __func__,
3832             vap,
3833             ni);
3834
3835         in->in_assoc = 0;
3836
3837         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3838         if (error != 0)
3839                 return error;
3840
3841         error = iwm_allow_mcast(vap, sc);
3842         if (error) {
3843                 device_printf(sc->sc_dev,
3844                     "%s: failed to set multicast\n", __func__);
3845                 goto out;
3846         }
3847
3848         /*
3849          * This is where it deviates from what Linux does.
3850          *
3851          * Linux iwlwifi doesn't reset the nic each time, nor does it
3852          * call ctxt_add() here.  Instead, it adds it during vap creation,
3853          * and always does a mac_ctx_changed().
3854          *
3855          * The openbsd port doesn't attempt to do that - it reset things
3856          * at odd states and does the add here.
3857          *
3858          * So, until the state handling is fixed (ie, we never reset
3859          * the NIC except for a firmware failure, which should drag
3860          * the NIC back to IDLE, re-setup and re-add all the mac/phy
3861          * contexts that are required), let's do a dirty hack here.
3862          */
3863         if (iv->is_uploaded) {
3864                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3865                         device_printf(sc->sc_dev,
3866                             "%s: failed to update MAC\n", __func__);
3867                         goto out;
3868                 }
3869                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3870                     in->in_ni.ni_chan, 1, 1)) != 0) {
3871                         device_printf(sc->sc_dev,
3872                             "%s: failed update phy ctxt\n", __func__);
3873                         goto out;
3874                 }
3875                 in->in_phyctxt = &sc->sc_phyctxt[0];
3876
3877                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
3878                         device_printf(sc->sc_dev,
3879                             "%s: binding update cmd\n", __func__);
3880                         goto out;
3881                 }
3882                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3883                         device_printf(sc->sc_dev,
3884                             "%s: failed to update sta\n", __func__);
3885                         goto out;
3886                 }
3887         } else {
3888                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3889                         device_printf(sc->sc_dev,
3890                             "%s: failed to add MAC\n", __func__);
3891                         goto out;
3892                 }
3893                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3894                     in->in_ni.ni_chan, 1, 1)) != 0) {
3895                         device_printf(sc->sc_dev,
3896                             "%s: failed add phy ctxt!\n", __func__);
3897                         error = ETIMEDOUT;
3898                         goto out;
3899                 }
3900                 in->in_phyctxt = &sc->sc_phyctxt[0];
3901
3902                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3903                         device_printf(sc->sc_dev,
3904                             "%s: binding add cmd\n", __func__);
3905                         goto out;
3906                 }
3907                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3908                         device_printf(sc->sc_dev,
3909                             "%s: failed to add sta\n", __func__);
3910                         goto out;
3911                 }
3912         }
3913
3914         /*
3915          * Prevent the FW from wandering off channel during association
3916          * by "protecting" the session with a time event.
3917          */
3918         /* XXX duration is in units of TU, not MS */
3919         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3920         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
3921         DELAY(100);
3922
3923         error = 0;
3924 out:
3925         ieee80211_free_node(ni);
3926         return (error);
3927 }
3928
3929 static int
3930 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3931 {
3932         struct iwm_node *in = IWM_NODE(vap->iv_bss);
3933         int error;
3934
3935         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3936                 device_printf(sc->sc_dev,
3937                     "%s: failed to update STA\n", __func__);
3938                 return error;
3939         }
3940
3941         in->in_assoc = 1;
3942         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3943                 device_printf(sc->sc_dev,
3944                     "%s: failed to update MAC\n", __func__);
3945                 return error;
3946         }
3947
3948         return 0;
3949 }
3950
3951 static int
3952 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3953 {
3954         uint32_t tfd_msk;
3955
3956         /*
3957          * Ok, so *technically* the proper set of calls for going
3958          * from RUN back to SCAN is:
3959          *
3960          * iwm_mvm_power_mac_disable(sc, in);
3961          * iwm_mvm_mac_ctxt_changed(sc, in);
3962          * iwm_mvm_rm_sta(sc, in);
3963          * iwm_mvm_update_quotas(sc, NULL);
3964          * iwm_mvm_mac_ctxt_changed(sc, in);
3965          * iwm_mvm_binding_remove_vif(sc, in);
3966          * iwm_mvm_mac_ctxt_remove(sc, in);
3967          *
3968          * However, that freezes the device not matter which permutations
3969          * and modifications are attempted.  Obviously, this driver is missing
3970          * something since it works in the Linux driver, but figuring out what
3971          * is missing is a little more complicated.  Now, since we're going
3972          * back to nothing anyway, we'll just do a complete device reset.
3973          * Up your's, device!
3974          */
3975         /*
3976          * Just using 0xf for the queues mask is fine as long as we only
3977          * get here from RUN state.
3978          */
3979         tfd_msk = 0xf;
3980         mbufq_drain(&sc->sc_snd);
3981         iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
3982         /*
3983          * We seem to get away with just synchronously sending the
3984          * IWM_TXPATH_FLUSH command.
3985          */
3986 //      iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
3987         iwm_stop_device(sc);
3988         iwm_init_hw(sc);
3989         if (in)
3990                 in->in_assoc = 0;
3991         return 0;
3992
3993 #if 0
3994         int error;
3995
3996         iwm_mvm_power_mac_disable(sc, in);
3997
3998         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3999                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4000                 return error;
4001         }
4002
4003         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4004                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4005                 return error;
4006         }
4007         error = iwm_mvm_rm_sta(sc, in);
4008         in->in_assoc = 0;
4009         iwm_mvm_update_quotas(sc, NULL);
4010         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4011                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4012                 return error;
4013         }
4014         iwm_mvm_binding_remove_vif(sc, in);
4015
4016         iwm_mvm_mac_ctxt_remove(sc, in);
4017
4018         return error;
4019 #endif
4020 }
4021
4022 static struct ieee80211_node *
4023 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4024 {
4025         return malloc(sizeof (struct iwm_node), M_80211_NODE,
4026             M_NOWAIT | M_ZERO);
4027 }
4028
4029 static void
4030 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4031 {
4032         struct ieee80211_node *ni = &in->in_ni;
4033         struct iwm_lq_cmd *lq = &in->in_lq;
4034         int nrates = ni->ni_rates.rs_nrates;
4035         int i, ridx, tab = 0;
4036 //      int txant = 0;
4037
4038         if (nrates > nitems(lq->rs_table)) {
4039                 device_printf(sc->sc_dev,
4040                     "%s: node supports %d rates, driver handles "
4041                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4042                 return;
4043         }
4044         if (nrates == 0) {
4045                 device_printf(sc->sc_dev,
4046                     "%s: node supports 0 rates, odd!\n", __func__);
4047                 return;
4048         }
4049
4050         /*
4051          * XXX .. and most of iwm_node is not initialised explicitly;
4052          * it's all just 0x0 passed to the firmware.
4053          */
4054
4055         /* first figure out which rates we should support */
4056         /* XXX TODO: this isn't 11n aware /at all/ */
4057         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4058         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4059             "%s: nrates=%d\n", __func__, nrates);
4060
4061         /*
4062          * Loop over nrates and populate in_ridx from the highest
4063          * rate to the lowest rate.  Remember, in_ridx[] has
4064          * IEEE80211_RATE_MAXSIZE entries!
4065          */
4066         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4067                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4068
4069                 /* Map 802.11 rate to HW rate index. */
4070                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4071                         if (iwm_rates[ridx].rate == rate)
4072                                 break;
4073                 if (ridx > IWM_RIDX_MAX) {
4074                         device_printf(sc->sc_dev,
4075                             "%s: WARNING: device rate for %d not found!\n",
4076                             __func__, rate);
4077                 } else {
4078                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4079                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4080                             __func__,
4081                             i,
4082                             rate,
4083                             ridx);
4084                         in->in_ridx[i] = ridx;
4085                 }
4086         }
4087
4088         /* then construct a lq_cmd based on those */
4089         memset(lq, 0, sizeof(*lq));
4090         lq->sta_id = IWM_STATION_ID;
4091
4092         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4093         if (ni->ni_flags & IEEE80211_NODE_HT)
4094                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4095
4096         /*
4097          * are these used? (we don't do SISO or MIMO)
4098          * need to set them to non-zero, though, or we get an error.
4099          */
4100         lq->single_stream_ant_msk = 1;
4101         lq->dual_stream_ant_msk = 1;
4102
4103         /*
4104          * Build the actual rate selection table.
4105          * The lowest bits are the rates.  Additionally,
4106          * CCK needs bit 9 to be set.  The rest of the bits
4107          * we add to the table select the tx antenna
4108          * Note that we add the rates in the highest rate first
4109          * (opposite of ni_rates).
4110          */
4111         /*
4112          * XXX TODO: this should be looping over the min of nrates
4113          * and LQ_MAX_RETRY_NUM.  Sigh.
4114          */
4115         for (i = 0; i < nrates; i++) {
4116                 int nextant;
4117
4118 #if 0
4119                 if (txant == 0)
4120                         txant = iwm_fw_valid_tx_ant(sc);
4121                 nextant = 1<<(ffs(txant)-1);
4122                 txant &= ~nextant;
4123 #else
4124                 nextant = iwm_fw_valid_tx_ant(sc);
4125 #endif
4126                 /*
4127                  * Map the rate id into a rate index into
4128                  * our hardware table containing the
4129                  * configuration to use for this rate.
4130                  */
4131                 ridx = in->in_ridx[i];
4132                 tab = iwm_rates[ridx].plcp;
4133                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4134                 if (IWM_RIDX_IS_CCK(ridx))
4135                         tab |= IWM_RATE_MCS_CCK_MSK;
4136                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4137                     "station rate i=%d, rate=%d, hw=%x\n",
4138                     i, iwm_rates[ridx].rate, tab);
4139                 lq->rs_table[i] = htole32(tab);
4140         }
4141         /* then fill the rest with the lowest possible rate */
4142         for (i = nrates; i < nitems(lq->rs_table); i++) {
4143                 KASSERT(tab != 0, ("invalid tab"));
4144                 lq->rs_table[i] = htole32(tab);
4145         }
4146 }
4147
4148 static int
4149 iwm_media_change(struct ifnet *ifp)
4150 {
4151         struct ieee80211vap *vap = ifp->if_softc;
4152         struct ieee80211com *ic = vap->iv_ic;
4153         struct iwm_softc *sc = ic->ic_softc;
4154         int error;
4155
4156         error = ieee80211_media_change(ifp);
4157         if (error != ENETRESET)
4158                 return error;
4159
4160         IWM_LOCK(sc);
4161         if (ic->ic_nrunning > 0) {
4162                 iwm_stop(sc);
4163                 iwm_init(sc);
4164         }
4165         IWM_UNLOCK(sc);
4166         return error;
4167 }
4168
4169
4170 static int
4171 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4172 {
4173         struct iwm_vap *ivp = IWM_VAP(vap);
4174         struct ieee80211com *ic = vap->iv_ic;
4175         struct iwm_softc *sc = ic->ic_softc;
4176         struct iwm_node *in;
4177         int error;
4178
4179         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4180             "switching state %s -> %s\n",
4181             ieee80211_state_name[vap->iv_state],
4182             ieee80211_state_name[nstate]);
4183         IEEE80211_UNLOCK(ic);
4184         IWM_LOCK(sc);
4185
4186         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4187                 iwm_led_blink_stop(sc);
4188
4189         /* disable beacon filtering if we're hopping out of RUN */
4190         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4191                 iwm_mvm_disable_beacon_filter(sc);
4192
4193                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4194                         in->in_assoc = 0;
4195
4196                 if (nstate == IEEE80211_S_INIT) {
4197                         IWM_UNLOCK(sc);
4198                         IEEE80211_LOCK(ic);
4199                         error = ivp->iv_newstate(vap, nstate, arg);
4200                         IEEE80211_UNLOCK(ic);
4201                         IWM_LOCK(sc);
4202                         iwm_release(sc, NULL);
4203                         IWM_UNLOCK(sc);
4204                         IEEE80211_LOCK(ic);
4205                         return error;
4206                 }
4207
4208                 /*
4209                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4210                  * above then the card will be completely reinitialized,
4211                  * so the driver must do everything necessary to bring the card
4212                  * from INIT to SCAN.
4213                  *
4214                  * Additionally, upon receiving deauth frame from AP,
4215                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4216                  * state. This will also fail with this driver, so bring the FSM
4217                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4218                  *
4219                  * XXX TODO: fix this for FreeBSD!
4220                  */
4221                 if (nstate == IEEE80211_S_SCAN ||
4222                     nstate == IEEE80211_S_AUTH ||
4223                     nstate == IEEE80211_S_ASSOC) {
4224                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4225                             "Force transition to INIT; MGT=%d\n", arg);
4226                         IWM_UNLOCK(sc);
4227                         IEEE80211_LOCK(ic);
4228                         /* Always pass arg as -1 since we can't Tx right now. */
4229                         /*
4230                          * XXX arg is just ignored anyway when transitioning
4231                          *     to IEEE80211_S_INIT.
4232                          */
4233                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4234                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4235                             "Going INIT->SCAN\n");
4236                         nstate = IEEE80211_S_SCAN;
4237                         IEEE80211_UNLOCK(ic);
4238                         IWM_LOCK(sc);
4239                 }
4240         }
4241
4242         switch (nstate) {
4243         case IEEE80211_S_INIT:
4244                 break;
4245
4246         case IEEE80211_S_AUTH:
4247                 if ((error = iwm_auth(vap, sc)) != 0) {
4248                         device_printf(sc->sc_dev,
4249                             "%s: could not move to auth state: %d\n",
4250                             __func__, error);
4251                         break;
4252                 }
4253                 break;
4254
4255         case IEEE80211_S_ASSOC:
4256                 if ((error = iwm_assoc(vap, sc)) != 0) {
4257                         device_printf(sc->sc_dev,
4258                             "%s: failed to associate: %d\n", __func__,
4259                             error);
4260                         break;
4261                 }
4262                 break;
4263
4264         case IEEE80211_S_RUN:
4265         {
4266                 struct iwm_host_cmd cmd = {
4267                         .id = IWM_LQ_CMD,
4268                         .len = { sizeof(in->in_lq), },
4269                         .flags = IWM_CMD_SYNC,
4270                 };
4271
4272                 /* Update the association state, now we have it all */
4273                 /* (eg associd comes in at this point */
4274                 error = iwm_assoc(vap, sc);
4275                 if (error != 0) {
4276                         device_printf(sc->sc_dev,
4277                             "%s: failed to update association state: %d\n",
4278                             __func__,
4279                             error);
4280                         break;
4281                 }
4282
4283                 in = IWM_NODE(vap->iv_bss);
4284                 iwm_mvm_power_mac_update_mode(sc, in);
4285                 iwm_mvm_enable_beacon_filter(sc, in);
4286                 iwm_mvm_update_quotas(sc, in);
4287                 iwm_setrates(sc, in);
4288
4289                 cmd.data[0] = &in->in_lq;
4290                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4291                         device_printf(sc->sc_dev,
4292                             "%s: IWM_LQ_CMD failed\n", __func__);
4293                 }
4294
4295                 iwm_mvm_led_enable(sc);
4296                 break;
4297         }
4298
4299         default:
4300                 break;
4301         }
4302         IWM_UNLOCK(sc);
4303         IEEE80211_LOCK(ic);
4304
4305         return (ivp->iv_newstate(vap, nstate, arg));
4306 }
4307
4308 void
4309 iwm_endscan_cb(void *arg, int pending)
4310 {
4311         struct iwm_softc *sc = arg;
4312         struct ieee80211com *ic = &sc->sc_ic;
4313
4314         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4315             "%s: scan ended\n",
4316             __func__);
4317
4318         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4319 }
4320
4321 /*
4322  * Aging and idle timeouts for the different possible scenarios
4323  * in default configuration
4324  */
4325 static const uint32_t
4326 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4327         {
4328                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4329                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4330         },
4331         {
4332                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4333                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4334         },
4335         {
4336                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4337                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4338         },
4339         {
4340                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4341                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4342         },
4343         {
4344                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4345                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4346         },
4347 };
4348
4349 /*
4350  * Aging and idle timeouts for the different possible scenarios
4351  * in single BSS MAC configuration.
4352  */
4353 static const uint32_t
4354 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4355         {
4356                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4357                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4358         },
4359         {
4360                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4361                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4362         },
4363         {
4364                 htole32(IWM_SF_MCAST_AGING_TIMER),
4365                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4366         },
4367         {
4368                 htole32(IWM_SF_BA_AGING_TIMER),
4369                 htole32(IWM_SF_BA_IDLE_TIMER)
4370         },
4371         {
4372                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4373                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4374         },
4375 };
4376
4377 static void
4378 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4379     struct ieee80211_node *ni)
4380 {
4381         int i, j, watermark;
4382
4383         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4384
4385         /*
4386          * If we are in association flow - check antenna configuration
4387          * capabilities of the AP station, and choose the watermark accordingly.
4388          */
4389         if (ni) {
4390                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4391 #ifdef notyet
4392                         if (ni->ni_rxmcs[2] != 0)
4393                                 watermark = IWM_SF_W_MARK_MIMO3;
4394                         else if (ni->ni_rxmcs[1] != 0)
4395                                 watermark = IWM_SF_W_MARK_MIMO2;
4396                         else
4397 #endif
4398                                 watermark = IWM_SF_W_MARK_SISO;
4399                 } else {
4400                         watermark = IWM_SF_W_MARK_LEGACY;
4401                 }
4402         /* default watermark value for unassociated mode. */
4403         } else {
4404                 watermark = IWM_SF_W_MARK_MIMO2;
4405         }
4406         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4407
4408         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4409                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4410                         sf_cmd->long_delay_timeouts[i][j] =
4411                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4412                 }
4413         }
4414
4415         if (ni) {
4416                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4417                        sizeof(iwm_sf_full_timeout));
4418         } else {
4419                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4420                        sizeof(iwm_sf_full_timeout_def));
4421         }
4422 }
4423
4424 static int
4425 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4426 {
4427         struct ieee80211com *ic = &sc->sc_ic;
4428         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4429         struct iwm_sf_cfg_cmd sf_cmd = {
4430                 .state = htole32(IWM_SF_FULL_ON),
4431         };
4432         int ret = 0;
4433
4434         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
4435                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4436
4437         switch (new_state) {
4438         case IWM_SF_UNINIT:
4439         case IWM_SF_INIT_OFF:
4440                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4441                 break;
4442         case IWM_SF_FULL_ON:
4443                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4444                 break;
4445         default:
4446                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4447                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4448                           new_state);
4449                 return EINVAL;
4450         }
4451
4452         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4453                                    sizeof(sf_cmd), &sf_cmd);
4454         return ret;
4455 }
4456
4457 static int
4458 iwm_send_bt_init_conf(struct iwm_softc *sc)
4459 {
4460         struct iwm_bt_coex_cmd bt_cmd;
4461
4462         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4463         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4464
4465         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4466             &bt_cmd);
4467 }
4468
4469 static int
4470 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4471 {
4472         struct iwm_mcc_update_cmd mcc_cmd;
4473         struct iwm_host_cmd hcmd = {
4474                 .id = IWM_MCC_UPDATE_CMD,
4475                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4476                 .data = { &mcc_cmd },
4477         };
4478         int ret;
4479 #ifdef IWM_DEBUG
4480         struct iwm_rx_packet *pkt;
4481         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4482         struct iwm_mcc_update_resp *mcc_resp;
4483         int n_channels;
4484         uint16_t mcc;
4485 #endif
4486         int resp_v2 = isset(sc->sc_enabled_capa,
4487             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4488
4489         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4490         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4491         if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4492             isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4493                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4494         else
4495                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4496
4497         if (resp_v2)
4498                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4499         else
4500                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4501
4502         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4503             "send MCC update to FW with '%c%c' src = %d\n",
4504             alpha2[0], alpha2[1], mcc_cmd.source_id);
4505
4506         ret = iwm_send_cmd(sc, &hcmd);
4507         if (ret)
4508                 return ret;
4509
4510 #ifdef IWM_DEBUG
4511         pkt = hcmd.resp_pkt;
4512
4513         /* Extract MCC response */
4514         if (resp_v2) {
4515                 mcc_resp = (void *)pkt->data;
4516                 mcc = mcc_resp->mcc;
4517                 n_channels =  le32toh(mcc_resp->n_channels);
4518         } else {
4519                 mcc_resp_v1 = (void *)pkt->data;
4520                 mcc = mcc_resp_v1->mcc;
4521                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4522         }
4523
4524         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4525         if (mcc == 0)
4526                 mcc = 0x3030;  /* "00" - world */
4527
4528         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4529             "regulatory domain '%c%c' (%d channels available)\n",
4530             mcc >> 8, mcc & 0xff, n_channels);
4531 #endif
4532         iwm_free_resp(sc, &hcmd);
4533
4534         return 0;
4535 }
4536
4537 static void
4538 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4539 {
4540         struct iwm_host_cmd cmd = {
4541                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4542                 .len = { sizeof(uint32_t), },
4543                 .data = { &backoff, },
4544         };
4545
4546         if (iwm_send_cmd(sc, &cmd) != 0) {
4547                 device_printf(sc->sc_dev,
4548                     "failed to change thermal tx backoff\n");
4549         }
4550 }
4551
4552 static int
4553 iwm_init_hw(struct iwm_softc *sc)
4554 {
4555         struct ieee80211com *ic = &sc->sc_ic;
4556         int error, i, ac;
4557
4558         if ((error = iwm_start_hw(sc)) != 0) {
4559                 printf("iwm_start_hw: failed %d\n", error);
4560                 return error;
4561         }
4562
4563         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4564                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4565                 return error;
4566         }
4567
4568         /*
4569          * should stop and start HW since that INIT
4570          * image just loaded
4571          */
4572         iwm_stop_device(sc);
4573         if ((error = iwm_start_hw(sc)) != 0) {
4574                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4575                 return error;
4576         }
4577
4578         /* omstart, this time with the regular firmware */
4579         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
4580         if (error) {
4581                 device_printf(sc->sc_dev, "could not load firmware\n");
4582                 goto error;
4583         }
4584
4585         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4586                 device_printf(sc->sc_dev, "bt init conf failed\n");
4587                 goto error;
4588         }
4589
4590         if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
4591                 device_printf(sc->sc_dev, "antenna config failed\n");
4592                 goto error;
4593         }
4594
4595         /* Send phy db control command and then phy db calibration*/
4596         if ((error = iwm_send_phy_db_data(sc)) != 0) {
4597                 device_printf(sc->sc_dev, "phy_db_data failed\n");
4598                 goto error;
4599         }
4600
4601         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4602                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4603                 goto error;
4604         }
4605
4606         /* Add auxiliary station for scanning */
4607         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4608                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4609                 goto error;
4610         }
4611
4612         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4613                 /*
4614                  * The channel used here isn't relevant as it's
4615                  * going to be overwritten in the other flows.
4616                  * For now use the first channel we have.
4617                  */
4618                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4619                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4620                         goto error;
4621         }
4622
4623         /* Initialize tx backoffs to the minimum. */
4624         if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
4625                 iwm_mvm_tt_tx_backoff(sc, 0);
4626
4627         error = iwm_mvm_power_update_device(sc);
4628         if (error)
4629                 goto error;
4630
4631         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4632                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4633                         goto error;
4634         }
4635
4636         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4637                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4638                         goto error;
4639         }
4640
4641         /* Enable Tx queues. */
4642         for (ac = 0; ac < WME_NUM_AC; ac++) {
4643                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4644                     iwm_mvm_ac_to_tx_fifo[ac]);
4645                 if (error)
4646                         goto error;
4647         }
4648
4649         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4650                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4651                 goto error;
4652         }
4653
4654         return 0;
4655
4656  error:
4657         iwm_stop_device(sc);
4658         return error;
4659 }
4660
4661 /* Allow multicast from our BSSID. */
4662 static int
4663 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4664 {
4665         struct ieee80211_node *ni = vap->iv_bss;
4666         struct iwm_mcast_filter_cmd *cmd;
4667         size_t size;
4668         int error;
4669
4670         size = roundup(sizeof(*cmd), 4);
4671         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4672         if (cmd == NULL)
4673                 return ENOMEM;
4674         cmd->filter_own = 1;
4675         cmd->port_id = 0;
4676         cmd->count = 0;
4677         cmd->pass_all = 1;
4678         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4679
4680         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4681             IWM_CMD_SYNC, size, cmd);
4682         free(cmd, M_DEVBUF);
4683
4684         return (error);
4685 }
4686
4687 /*
4688  * ifnet interfaces
4689  */
4690
4691 static void
4692 iwm_init(struct iwm_softc *sc)
4693 {
4694         int error;
4695
4696         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4697                 return;
4698         }
4699         sc->sc_generation++;
4700         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4701
4702         if ((error = iwm_init_hw(sc)) != 0) {
4703                 printf("iwm_init_hw failed %d\n", error);
4704                 iwm_stop(sc);
4705                 return;
4706         }
4707
4708         /*
4709          * Ok, firmware loaded and we are jogging
4710          */
4711         sc->sc_flags |= IWM_FLAG_HW_INITED;
4712         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4713 }
4714
4715 static int
4716 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4717 {
4718         struct iwm_softc *sc;
4719         int error;
4720
4721         sc = ic->ic_softc;
4722
4723         IWM_LOCK(sc);
4724         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4725                 IWM_UNLOCK(sc);
4726                 return (ENXIO);
4727         }
4728         error = mbufq_enqueue(&sc->sc_snd, m);
4729         if (error) {
4730                 IWM_UNLOCK(sc);
4731                 return (error);
4732         }
4733         iwm_start(sc);
4734         IWM_UNLOCK(sc);
4735         return (0);
4736 }
4737
4738 /*
4739  * Dequeue packets from sendq and call send.
4740  */
4741 static void
4742 iwm_start(struct iwm_softc *sc)
4743 {
4744         struct ieee80211_node *ni;
4745         struct mbuf *m;
4746         int ac = 0;
4747
4748         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4749         while (sc->qfullmsk == 0 &&
4750                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4751                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4752                 if (iwm_tx(sc, m, ni, ac) != 0) {
4753                         if_inc_counter(ni->ni_vap->iv_ifp,
4754                             IFCOUNTER_OERRORS, 1);
4755                         ieee80211_free_node(ni);
4756                         continue;
4757                 }
4758                 sc->sc_tx_timer = 15;
4759         }
4760         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4761 }
4762
4763 static void
4764 iwm_stop(struct iwm_softc *sc)
4765 {
4766
4767         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4768         sc->sc_flags |= IWM_FLAG_STOPPED;
4769         sc->sc_generation++;
4770         iwm_led_blink_stop(sc);
4771         sc->sc_tx_timer = 0;
4772         iwm_stop_device(sc);
4773 }
4774
4775 static void
4776 iwm_watchdog(void *arg)
4777 {
4778         struct iwm_softc *sc = arg;
4779         struct ieee80211com *ic = &sc->sc_ic;
4780
4781         if (sc->sc_tx_timer > 0) {
4782                 if (--sc->sc_tx_timer == 0) {
4783                         device_printf(sc->sc_dev, "device timeout\n");
4784 #ifdef IWM_DEBUG
4785                         iwm_nic_error(sc);
4786 #endif
4787                         ieee80211_restart_all(ic);
4788                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4789                         return;
4790                 }
4791         }
4792         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4793 }
4794
4795 static void
4796 iwm_parent(struct ieee80211com *ic)
4797 {
4798         struct iwm_softc *sc = ic->ic_softc;
4799         int startall = 0;
4800
4801         IWM_LOCK(sc);
4802         if (ic->ic_nrunning > 0) {
4803                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4804                         iwm_init(sc);
4805                         startall = 1;
4806                 }
4807         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4808                 iwm_stop(sc);
4809         IWM_UNLOCK(sc);
4810         if (startall)
4811                 ieee80211_start_all(ic);
4812 }
4813
4814 /*
4815  * The interrupt side of things
4816  */
4817
4818 /*
4819  * error dumping routines are from iwlwifi/mvm/utils.c
4820  */
4821
4822 /*
4823  * Note: This structure is read from the device with IO accesses,
4824  * and the reading already does the endian conversion. As it is
4825  * read with uint32_t-sized accesses, any members with a different size
4826  * need to be ordered correctly though!
4827  */
4828 struct iwm_error_event_table {
4829         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4830         uint32_t error_id;              /* type of error */
4831         uint32_t trm_hw_status0;        /* TRM HW status */
4832         uint32_t trm_hw_status1;        /* TRM HW status */
4833         uint32_t blink2;                /* branch link */
4834         uint32_t ilink1;                /* interrupt link */
4835         uint32_t ilink2;                /* interrupt link */
4836         uint32_t data1;         /* error-specific data */
4837         uint32_t data2;         /* error-specific data */
4838         uint32_t data3;         /* error-specific data */
4839         uint32_t bcon_time;             /* beacon timer */
4840         uint32_t tsf_low;               /* network timestamp function timer */
4841         uint32_t tsf_hi;                /* network timestamp function timer */
4842         uint32_t gp1;           /* GP1 timer register */
4843         uint32_t gp2;           /* GP2 timer register */
4844         uint32_t fw_rev_type;   /* firmware revision type */
4845         uint32_t major;         /* uCode version major */
4846         uint32_t minor;         /* uCode version minor */
4847         uint32_t hw_ver;                /* HW Silicon version */
4848         uint32_t brd_ver;               /* HW board version */
4849         uint32_t log_pc;                /* log program counter */
4850         uint32_t frame_ptr;             /* frame pointer */
4851         uint32_t stack_ptr;             /* stack pointer */
4852         uint32_t hcmd;          /* last host command header */
4853         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4854                                  * rxtx_flag */
4855         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4856                                  * host_flag */
4857         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4858                                  * enc_flag */
4859         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4860                                  * time_flag */
4861         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4862                                  * wico interrupt */
4863         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
4864         uint32_t wait_event;            /* wait event() caller address */
4865         uint32_t l2p_control;   /* L2pControlField */
4866         uint32_t l2p_duration;  /* L2pDurationField */
4867         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4868         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4869         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4870                                  * (LMPM_PMG_SEL) */
4871         uint32_t u_timestamp;   /* indicate when the date and time of the
4872                                  * compilation */
4873         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
4874 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4875
4876 /*
4877  * UMAC error struct - relevant starting from family 8000 chip.
4878  * Note: This structure is read from the device with IO accesses,
4879  * and the reading already does the endian conversion. As it is
4880  * read with u32-sized accesses, any members with a different size
4881  * need to be ordered correctly though!
4882  */
4883 struct iwm_umac_error_event_table {
4884         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4885         uint32_t error_id;      /* type of error */
4886         uint32_t blink1;        /* branch link */
4887         uint32_t blink2;        /* branch link */
4888         uint32_t ilink1;        /* interrupt link */
4889         uint32_t ilink2;        /* interrupt link */
4890         uint32_t data1;         /* error-specific data */
4891         uint32_t data2;         /* error-specific data */
4892         uint32_t data3;         /* error-specific data */
4893         uint32_t umac_major;
4894         uint32_t umac_minor;
4895         uint32_t frame_pointer; /* core register 27*/
4896         uint32_t stack_pointer; /* core register 28 */
4897         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
4898         uint32_t nic_isr_pref;  /* ISR status register */
4899 } __packed;
4900
4901 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4902 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4903
4904 #ifdef IWM_DEBUG
4905 struct {
4906         const char *name;
4907         uint8_t num;
4908 } advanced_lookup[] = {
4909         { "NMI_INTERRUPT_WDG", 0x34 },
4910         { "SYSASSERT", 0x35 },
4911         { "UCODE_VERSION_MISMATCH", 0x37 },
4912         { "BAD_COMMAND", 0x38 },
4913         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4914         { "FATAL_ERROR", 0x3D },
4915         { "NMI_TRM_HW_ERR", 0x46 },
4916         { "NMI_INTERRUPT_TRM", 0x4C },
4917         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4918         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4919         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4920         { "NMI_INTERRUPT_HOST", 0x66 },
4921         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4922         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4923         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4924         { "ADVANCED_SYSASSERT", 0 },
4925 };
4926
4927 static const char *
4928 iwm_desc_lookup(uint32_t num)
4929 {
4930         int i;
4931
4932         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4933                 if (advanced_lookup[i].num == num)
4934                         return advanced_lookup[i].name;
4935
4936         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4937         return advanced_lookup[i].name;
4938 }
4939
4940 static void
4941 iwm_nic_umac_error(struct iwm_softc *sc)
4942 {
4943         struct iwm_umac_error_event_table table;
4944         uint32_t base;
4945
4946         base = sc->sc_uc.uc_umac_error_event_table;
4947
4948         if (base < 0x800000) {
4949                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
4950                     base);
4951                 return;
4952         }
4953
4954         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4955                 device_printf(sc->sc_dev, "reading errlog failed\n");
4956                 return;
4957         }
4958
4959         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4960                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
4961                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
4962                     sc->sc_flags, table.valid);
4963         }
4964
4965         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
4966                 iwm_desc_lookup(table.error_id));
4967         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
4968         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
4969         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
4970             table.ilink1);
4971         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
4972             table.ilink2);
4973         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
4974         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
4975         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
4976         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
4977         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
4978         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
4979             table.frame_pointer);
4980         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
4981             table.stack_pointer);
4982         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
4983         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
4984             table.nic_isr_pref);
4985 }
4986
4987 /*
4988  * Support for dumping the error log seemed like a good idea ...
4989  * but it's mostly hex junk and the only sensible thing is the
4990  * hw/ucode revision (which we know anyway).  Since it's here,
4991  * I'll just leave it in, just in case e.g. the Intel guys want to
4992  * help us decipher some "ADVANCED_SYSASSERT" later.
4993  */
4994 static void
4995 iwm_nic_error(struct iwm_softc *sc)
4996 {
4997         struct iwm_error_event_table table;
4998         uint32_t base;
4999
5000         device_printf(sc->sc_dev, "dumping device error log\n");
5001         base = sc->sc_uc.uc_error_event_table;
5002         if (base < 0x800000) {
5003                 device_printf(sc->sc_dev,
5004                     "Invalid error log pointer 0x%08x\n", base);
5005                 return;
5006         }
5007
5008         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5009                 device_printf(sc->sc_dev, "reading errlog failed\n");
5010                 return;
5011         }
5012
5013         if (!table.valid) {
5014                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5015                 return;
5016         }
5017
5018         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5019                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5020                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5021                     sc->sc_flags, table.valid);
5022         }
5023
5024         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5025             iwm_desc_lookup(table.error_id));
5026         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5027             table.trm_hw_status0);
5028         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5029             table.trm_hw_status1);
5030         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5031         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5032         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5033         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5034         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5035         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5036         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5037         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5038         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5039         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5040         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5041         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5042             table.fw_rev_type);
5043         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5044         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5045         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5046         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5047         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5048         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5049         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5050         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5051         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5052         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5053         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5054         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5055         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5056         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5057         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5058         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5059         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5060         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5061         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5062
5063         if (sc->sc_uc.uc_umac_error_event_table)
5064                 iwm_nic_umac_error(sc);
5065 }
5066 #endif
5067
5068 #define SYNC_RESP_STRUCT(_var_, _pkt_)                                  \
5069 do {                                                                    \
5070         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
5071         _var_ = (void *)((_pkt_)+1);                                    \
5072 } while (/*CONSTCOND*/0)
5073
5074 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)                              \
5075 do {                                                                    \
5076         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
5077         _ptr_ = (void *)((_pkt_)+1);                                    \
5078 } while (/*CONSTCOND*/0)
5079
5080 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5081
5082 /*
5083  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5084  * Basic structure from if_iwn
5085  */
5086 static void
5087 iwm_notif_intr(struct iwm_softc *sc)
5088 {
5089         struct ieee80211com *ic = &sc->sc_ic;
5090         uint16_t hw;
5091
5092         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5093             BUS_DMASYNC_POSTREAD);
5094
5095         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5096
5097         /*
5098          * Process responses
5099          */
5100         while (sc->rxq.cur != hw) {
5101                 struct iwm_rx_ring *ring = &sc->rxq;
5102                 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
5103                 struct iwm_rx_packet *pkt;
5104                 struct iwm_cmd_response *cresp;
5105                 int qid, idx, code;
5106
5107                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5108                     BUS_DMASYNC_POSTREAD);
5109                 pkt = mtod(data->m, struct iwm_rx_packet *);
5110
5111                 qid = pkt->hdr.qid & ~0x80;
5112                 idx = pkt->hdr.idx;
5113
5114                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5115                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5116                     "rx packet qid=%d idx=%d type=%x %d %d\n",
5117                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, sc->rxq.cur, hw);
5118
5119                 /*
5120                  * randomly get these from the firmware, no idea why.
5121                  * they at least seem harmless, so just ignore them for now
5122                  */
5123                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5124                     || pkt->len_n_flags == htole32(0x55550000))) {
5125                         ADVANCE_RXQ(sc);
5126                         continue;
5127                 }
5128
5129                 switch (code) {
5130                 case IWM_REPLY_RX_PHY_CMD:
5131                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5132                         break;
5133
5134                 case IWM_REPLY_RX_MPDU_CMD:
5135                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5136                         break;
5137
5138                 case IWM_TX_CMD:
5139                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
5140                         break;
5141
5142                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5143                         struct iwm_missed_beacons_notif *resp;
5144                         int missed;
5145
5146                         /* XXX look at mac_id to determine interface ID */
5147                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5148
5149                         SYNC_RESP_STRUCT(resp, pkt);
5150                         missed = le32toh(resp->consec_missed_beacons);
5151
5152                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5153                             "%s: MISSED_BEACON: mac_id=%d, "
5154                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5155                             "num_rx=%d\n",
5156                             __func__,
5157                             le32toh(resp->mac_id),
5158                             le32toh(resp->consec_missed_beacons_since_last_rx),
5159                             le32toh(resp->consec_missed_beacons),
5160                             le32toh(resp->num_expected_beacons),
5161                             le32toh(resp->num_recvd_beacons));
5162
5163                         /* Be paranoid */
5164                         if (vap == NULL)
5165                                 break;
5166
5167                         /* XXX no net80211 locking? */
5168                         if (vap->iv_state == IEEE80211_S_RUN &&
5169                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5170                                 if (missed > vap->iv_bmissthreshold) {
5171                                         /* XXX bad locking; turn into task */
5172                                         IWM_UNLOCK(sc);
5173                                         ieee80211_beacon_miss(ic);
5174                                         IWM_LOCK(sc);
5175                                 }
5176                         }
5177
5178                         break; }
5179
5180                 case IWM_MFUART_LOAD_NOTIFICATION:
5181                         break;
5182
5183                 case IWM_MVM_ALIVE: {
5184                         struct iwm_mvm_alive_resp_v1 *resp1;
5185                         struct iwm_mvm_alive_resp_v2 *resp2;
5186                         struct iwm_mvm_alive_resp_v3 *resp3;
5187
5188                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
5189                                 SYNC_RESP_STRUCT(resp1, pkt);
5190                                 sc->sc_uc.uc_error_event_table
5191                                     = le32toh(resp1->error_event_table_ptr);
5192                                 sc->sc_uc.uc_log_event_table
5193                                     = le32toh(resp1->log_event_table_ptr);
5194                                 sc->sched_base = le32toh(resp1->scd_base_ptr);
5195                                 if (resp1->status == IWM_ALIVE_STATUS_OK)
5196                                         sc->sc_uc.uc_ok = 1;
5197                                 else
5198                                         sc->sc_uc.uc_ok = 0;
5199                         }
5200
5201                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
5202                                 SYNC_RESP_STRUCT(resp2, pkt);
5203                                 sc->sc_uc.uc_error_event_table
5204                                     = le32toh(resp2->error_event_table_ptr);
5205                                 sc->sc_uc.uc_log_event_table
5206                                     = le32toh(resp2->log_event_table_ptr);
5207                                 sc->sched_base = le32toh(resp2->scd_base_ptr);
5208                                 sc->sc_uc.uc_umac_error_event_table
5209                                     = le32toh(resp2->error_info_addr);
5210                                 if (resp2->status == IWM_ALIVE_STATUS_OK)
5211                                         sc->sc_uc.uc_ok = 1;
5212                                 else
5213                                         sc->sc_uc.uc_ok = 0;
5214                         }
5215
5216                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
5217                                 SYNC_RESP_STRUCT(resp3, pkt);
5218                                 sc->sc_uc.uc_error_event_table
5219                                     = le32toh(resp3->error_event_table_ptr);
5220                                 sc->sc_uc.uc_log_event_table
5221                                     = le32toh(resp3->log_event_table_ptr);
5222                                 sc->sched_base = le32toh(resp3->scd_base_ptr);
5223                                 sc->sc_uc.uc_umac_error_event_table
5224                                     = le32toh(resp3->error_info_addr);
5225                                 if (resp3->status == IWM_ALIVE_STATUS_OK)
5226                                         sc->sc_uc.uc_ok = 1;
5227                                 else
5228                                         sc->sc_uc.uc_ok = 0;
5229                         }
5230
5231                         sc->sc_uc.uc_intr = 1;
5232                         wakeup(&sc->sc_uc);
5233                         break; }
5234
5235                 case IWM_CALIB_RES_NOTIF_PHY_DB: {
5236                         struct iwm_calib_res_notif_phy_db *phy_db_notif;
5237                         SYNC_RESP_STRUCT(phy_db_notif, pkt);
5238
5239                         iwm_phy_db_set_section(sc, phy_db_notif);
5240
5241                         break; }
5242
5243                 case IWM_STATISTICS_NOTIFICATION: {
5244                         struct iwm_notif_statistics *stats;
5245                         SYNC_RESP_STRUCT(stats, pkt);
5246                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5247                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5248                         break; }
5249
5250                 case IWM_NVM_ACCESS_CMD:
5251                 case IWM_MCC_UPDATE_CMD:
5252                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5253                                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
5254                                     BUS_DMASYNC_POSTREAD);
5255                                 memcpy(sc->sc_cmd_resp,
5256                                     pkt, sizeof(sc->sc_cmd_resp));
5257                         }
5258                         break;
5259
5260                 case IWM_MCC_CHUB_UPDATE_CMD: {
5261                         struct iwm_mcc_chub_notif *notif;
5262                         SYNC_RESP_STRUCT(notif, pkt);
5263
5264                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5265                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5266                         sc->sc_fw_mcc[2] = '\0';
5267                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5268                             "fw source %d sent CC '%s'\n",
5269                             notif->source_id, sc->sc_fw_mcc);
5270                         break; }
5271
5272                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5273                         break;
5274
5275                 case IWM_PHY_CONFIGURATION_CMD:
5276                 case IWM_TX_ANT_CONFIGURATION_CMD:
5277                 case IWM_ADD_STA:
5278                 case IWM_MAC_CONTEXT_CMD:
5279                 case IWM_REPLY_SF_CFG_CMD:
5280                 case IWM_POWER_TABLE_CMD:
5281                 case IWM_PHY_CONTEXT_CMD:
5282                 case IWM_BINDING_CONTEXT_CMD:
5283                 case IWM_TIME_EVENT_CMD:
5284                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5285                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5286                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5287                 case IWM_REPLY_BEACON_FILTERING_CMD:
5288                 case IWM_MAC_PM_POWER_TABLE:
5289                 case IWM_TIME_QUOTA_CMD:
5290                 case IWM_REMOVE_STA:
5291                 case IWM_TXPATH_FLUSH:
5292                 case IWM_LQ_CMD:
5293                 case IWM_BT_CONFIG:
5294                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5295                         SYNC_RESP_STRUCT(cresp, pkt);
5296                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5297                                 memcpy(sc->sc_cmd_resp,
5298                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5299                         }
5300                         break;
5301
5302                 /* ignore */
5303                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5304                         break;
5305
5306                 case IWM_INIT_COMPLETE_NOTIF:
5307                         sc->sc_init_complete = 1;
5308                         wakeup(&sc->sc_init_complete);
5309                         break;
5310
5311                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5312                         struct iwm_periodic_scan_complete *notif;
5313                         SYNC_RESP_STRUCT(notif, pkt);
5314                         break;
5315                 }
5316
5317                 case IWM_SCAN_ITERATION_COMPLETE: {
5318                         struct iwm_lmac_scan_complete_notif *notif;
5319                         SYNC_RESP_STRUCT(notif, pkt);
5320                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5321                         break;
5322                 }
5323  
5324                 case IWM_SCAN_COMPLETE_UMAC: {
5325                         struct iwm_umac_scan_complete *notif;
5326                         SYNC_RESP_STRUCT(notif, pkt);
5327
5328                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5329                             "UMAC scan complete, status=0x%x\n",
5330                             notif->status);
5331 #if 0   /* XXX This would be a duplicate scan end call */
5332                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5333 #endif
5334                         break;
5335                 }
5336
5337                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5338                         struct iwm_umac_scan_iter_complete_notif *notif;
5339                         SYNC_RESP_STRUCT(notif, pkt);
5340
5341                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5342                             "complete, status=0x%x, %d channels scanned\n",
5343                             notif->status, notif->scanned_channels);
5344                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5345                         break;
5346                 }
5347
5348                 case IWM_REPLY_ERROR: {
5349                         struct iwm_error_resp *resp;
5350                         SYNC_RESP_STRUCT(resp, pkt);
5351
5352                         device_printf(sc->sc_dev,
5353                             "firmware error 0x%x, cmd 0x%x\n",
5354                             le32toh(resp->error_type),
5355                             resp->cmd_id);
5356                         break;
5357                 }
5358
5359                 case IWM_TIME_EVENT_NOTIFICATION: {
5360                         struct iwm_time_event_notif *notif;
5361                         SYNC_RESP_STRUCT(notif, pkt);
5362
5363                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5364                             "TE notif status = 0x%x action = 0x%x\n",
5365                             notif->status, notif->action);
5366                         break;
5367                 }
5368
5369                 case IWM_MCAST_FILTER_CMD:
5370                         break;
5371
5372                 case IWM_SCD_QUEUE_CFG: {
5373                         struct iwm_scd_txq_cfg_rsp *rsp;
5374                         SYNC_RESP_STRUCT(rsp, pkt);
5375
5376                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5377                             "queue cfg token=0x%x sta_id=%d "
5378                             "tid=%d scd_queue=%d\n",
5379                             rsp->token, rsp->sta_id, rsp->tid,
5380                             rsp->scd_queue);
5381                         break;
5382                 }
5383
5384                 default:
5385                         device_printf(sc->sc_dev,
5386                             "frame %d/%d %x UNHANDLED (this should "
5387                             "not happen)\n", qid, idx,
5388                             pkt->len_n_flags);
5389                         break;
5390                 }
5391
5392                 /*
5393                  * Why test bit 0x80?  The Linux driver:
5394                  *
5395                  * There is one exception:  uCode sets bit 15 when it
5396                  * originates the response/notification, i.e. when the
5397                  * response/notification is not a direct response to a
5398                  * command sent by the driver.  For example, uCode issues
5399                  * IWM_REPLY_RX when it sends a received frame to the driver;
5400                  * it is not a direct response to any driver command.
5401                  *
5402                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5403                  * uses a slightly different format for pkt->hdr, and "qid"
5404                  * is actually the upper byte of a two-byte field.
5405                  */
5406                 if (!(pkt->hdr.qid & (1 << 7))) {
5407                         iwm_cmd_done(sc, pkt);
5408                 }
5409
5410                 ADVANCE_RXQ(sc);
5411         }
5412
5413         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
5414             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5415
5416         /*
5417          * Tell the firmware what we have processed.
5418          * Seems like the hardware gets upset unless we align
5419          * the write by 8??
5420          */
5421         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5422         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5423 }
5424
5425 static void
5426 iwm_intr(void *arg)
5427 {
5428         struct iwm_softc *sc = arg;
5429         int handled = 0;
5430         int r1, r2, rv = 0;
5431         int isperiodic = 0;
5432
5433         IWM_LOCK(sc);
5434         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5435
5436         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5437                 uint32_t *ict = sc->ict_dma.vaddr;
5438                 int tmp;
5439
5440                 tmp = htole32(ict[sc->ict_cur]);
5441                 if (!tmp)
5442                         goto out_ena;
5443
5444                 /*
5445                  * ok, there was something.  keep plowing until we have all.
5446                  */
5447                 r1 = r2 = 0;
5448                 while (tmp) {
5449                         r1 |= tmp;
5450                         ict[sc->ict_cur] = 0;
5451                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5452                         tmp = htole32(ict[sc->ict_cur]);
5453                 }
5454
5455                 /* this is where the fun begins.  don't ask */
5456                 if (r1 == 0xffffffff)
5457                         r1 = 0;
5458
5459                 /* i am not expected to understand this */
5460                 if (r1 & 0xc0000)
5461                         r1 |= 0x8000;
5462                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5463         } else {
5464                 r1 = IWM_READ(sc, IWM_CSR_INT);
5465                 /* "hardware gone" (where, fishing?) */
5466                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5467                         goto out;
5468                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5469         }
5470         if (r1 == 0 && r2 == 0) {
5471                 goto out_ena;
5472         }
5473
5474         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5475
5476         /* ignored */
5477         handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
5478
5479         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5480                 int i;
5481                 struct ieee80211com *ic = &sc->sc_ic;
5482                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5483
5484 #ifdef IWM_DEBUG
5485                 iwm_nic_error(sc);
5486 #endif
5487                 /* Dump driver status (TX and RX rings) while we're here. */
5488                 device_printf(sc->sc_dev, "driver status:\n");
5489                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5490                         struct iwm_tx_ring *ring = &sc->txq[i];
5491                         device_printf(sc->sc_dev,
5492                             "  tx ring %2d: qid=%-2d cur=%-3d "
5493                             "queued=%-3d\n",
5494                             i, ring->qid, ring->cur, ring->queued);
5495                 }
5496                 device_printf(sc->sc_dev,
5497                     "  rx ring: cur=%d\n", sc->rxq.cur);
5498                 device_printf(sc->sc_dev,
5499                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5500
5501                 /* Don't stop the device; just do a VAP restart */
5502                 IWM_UNLOCK(sc);
5503
5504                 if (vap == NULL) {
5505                         printf("%s: null vap\n", __func__);
5506                         return;
5507                 }
5508
5509                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5510                     "restarting\n", __func__, vap->iv_state);
5511
5512                 /* XXX TODO: turn this into a callout/taskqueue */
5513                 ieee80211_restart_all(ic);
5514                 return;
5515         }
5516
5517         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5518                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5519                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5520                 iwm_stop(sc);
5521                 rv = 1;
5522                 goto out;
5523         }
5524
5525         /* firmware chunk loaded */
5526         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5527                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5528                 handled |= IWM_CSR_INT_BIT_FH_TX;
5529                 sc->sc_fw_chunk_done = 1;
5530                 wakeup(&sc->sc_fw);
5531         }
5532
5533         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5534                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5535                 if (iwm_check_rfkill(sc)) {
5536                         device_printf(sc->sc_dev,
5537                             "%s: rfkill switch, disabling interface\n",
5538                             __func__);
5539                         iwm_stop(sc);
5540                 }
5541         }
5542
5543         /*
5544          * The Linux driver uses periodic interrupts to avoid races.
5545          * We cargo-cult like it's going out of fashion.
5546          */
5547         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5548                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5549                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5550                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5551                         IWM_WRITE_1(sc,
5552                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5553                 isperiodic = 1;
5554         }
5555
5556         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5557                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5558                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5559
5560                 iwm_notif_intr(sc);
5561
5562                 /* enable periodic interrupt, see above */
5563                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5564                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5565                             IWM_CSR_INT_PERIODIC_ENA);
5566         }
5567
5568         if (__predict_false(r1 & ~handled))
5569                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5570                     "%s: unhandled interrupts: %x\n", __func__, r1);
5571         rv = 1;
5572
5573  out_ena:
5574         iwm_restore_interrupts(sc);
5575  out:
5576         IWM_UNLOCK(sc);
5577         return;
5578 }
5579
5580 /*
5581  * Autoconf glue-sniffing
5582  */
5583 #define PCI_VENDOR_INTEL                0x8086
5584 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5585 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5586 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5587 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5588 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5589 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5590 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5591 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5592 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5593 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5594
5595 static const struct iwm_devices {
5596         uint16_t        device;
5597         const char      *name;
5598 } iwm_devices[] = {
5599         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5600         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5601         { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5602         { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5603         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5604         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5605         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5606         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5607         { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5608         { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5609 };
5610
5611 static int
5612 iwm_probe(device_t dev)
5613 {
5614         int i;
5615
5616         for (i = 0; i < nitems(iwm_devices); i++) {
5617                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5618                     pci_get_device(dev) == iwm_devices[i].device) {
5619                         device_set_desc(dev, iwm_devices[i].name);
5620                         return (BUS_PROBE_DEFAULT);
5621                 }
5622         }
5623
5624         return (ENXIO);
5625 }
5626
5627 static int
5628 iwm_dev_check(device_t dev)
5629 {
5630         struct iwm_softc *sc;
5631
5632         sc = device_get_softc(dev);
5633
5634         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5635         switch (pci_get_device(dev)) {
5636         case PCI_PRODUCT_INTEL_WL_3160_1:
5637         case PCI_PRODUCT_INTEL_WL_3160_2:
5638                 sc->sc_fwname = "iwm3160fw";
5639                 sc->host_interrupt_operation_mode = 1;
5640                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5641                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5642                 return (0);
5643         case PCI_PRODUCT_INTEL_WL_3165_1:
5644         case PCI_PRODUCT_INTEL_WL_3165_2:
5645                 sc->sc_fwname = "iwm7265fw";
5646                 sc->host_interrupt_operation_mode = 0;
5647                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5648                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5649                 return (0);
5650         case PCI_PRODUCT_INTEL_WL_7260_1:
5651         case PCI_PRODUCT_INTEL_WL_7260_2:
5652                 sc->sc_fwname = "iwm7260fw";
5653                 sc->host_interrupt_operation_mode = 1;
5654                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5655                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5656                 return (0);
5657         case PCI_PRODUCT_INTEL_WL_7265_1:
5658         case PCI_PRODUCT_INTEL_WL_7265_2:
5659                 sc->sc_fwname = "iwm7265fw";
5660                 sc->host_interrupt_operation_mode = 0;
5661                 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
5662                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5663                 return (0);
5664         case PCI_PRODUCT_INTEL_WL_8260_1:
5665         case PCI_PRODUCT_INTEL_WL_8260_2:
5666                 sc->sc_fwname = "iwm8000Cfw";
5667                 sc->host_interrupt_operation_mode = 0;
5668                 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
5669                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
5670                 return (0);
5671         default:
5672                 device_printf(dev, "unknown adapter type\n");
5673                 return ENXIO;
5674         }
5675 }
5676
5677 static int
5678 iwm_pci_attach(device_t dev)
5679 {
5680         struct iwm_softc *sc;
5681         int count, error, rid;
5682         uint16_t reg;
5683
5684         sc = device_get_softc(dev);
5685
5686         /* Clear device-specific "PCI retry timeout" register (41h). */
5687         reg = pci_read_config(dev, 0x40, sizeof(reg));
5688         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5689
5690         /* Enable bus-mastering and hardware bug workaround. */
5691         pci_enable_busmaster(dev);
5692         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5693         /* if !MSI */
5694         if (reg & PCIM_STATUS_INTxSTATE) {
5695                 reg &= ~PCIM_STATUS_INTxSTATE;
5696         }
5697         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5698
5699         rid = PCIR_BAR(0);
5700         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5701             RF_ACTIVE);
5702         if (sc->sc_mem == NULL) {
5703                 device_printf(sc->sc_dev, "can't map mem space\n");
5704                 return (ENXIO);
5705         }
5706         sc->sc_st = rman_get_bustag(sc->sc_mem);
5707         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5708
5709         /* Install interrupt handler. */
5710         count = 1;
5711         rid = 0;
5712         if (pci_alloc_msi(dev, &count) == 0)
5713                 rid = 1;
5714         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5715             (rid != 0 ? 0 : RF_SHAREABLE));
5716         if (sc->sc_irq == NULL) {
5717                 device_printf(dev, "can't map interrupt\n");
5718                         return (ENXIO);
5719         }
5720         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5721             NULL, iwm_intr, sc, &sc->sc_ih);
5722         if (sc->sc_ih == NULL) {
5723                 device_printf(dev, "can't establish interrupt");
5724                         return (ENXIO);
5725         }
5726         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5727
5728         return (0);
5729 }
5730
5731 static void
5732 iwm_pci_detach(device_t dev)
5733 {
5734         struct iwm_softc *sc = device_get_softc(dev);
5735
5736         if (sc->sc_irq != NULL) {
5737                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5738                 bus_release_resource(dev, SYS_RES_IRQ,
5739                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5740                 pci_release_msi(dev);
5741         }
5742         if (sc->sc_mem != NULL)
5743                 bus_release_resource(dev, SYS_RES_MEMORY,
5744                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5745 }
5746
5747
5748
5749 static int
5750 iwm_attach(device_t dev)
5751 {
5752         struct iwm_softc *sc = device_get_softc(dev);
5753         struct ieee80211com *ic = &sc->sc_ic;
5754         int error;
5755         int txq_i, i;
5756
5757         sc->sc_dev = dev;
5758         IWM_LOCK_INIT(sc);
5759         mbufq_init(&sc->sc_snd, ifqmaxlen);
5760         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5761         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5762         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5763
5764         /* PCI attach */
5765         error = iwm_pci_attach(dev);
5766         if (error != 0)
5767                 goto fail;
5768
5769         sc->sc_wantresp = -1;
5770
5771         /* Check device type */
5772         error = iwm_dev_check(dev);
5773         if (error != 0)
5774                 goto fail;
5775
5776         /*
5777          * We now start fiddling with the hardware
5778          */
5779         /*
5780          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5781          * changed, and now the revision step also includes bit 0-1 (no more
5782          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5783          * in the old format.
5784          */
5785         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
5786                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5787                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5788
5789         if (iwm_prepare_card_hw(sc) != 0) {
5790                 device_printf(dev, "could not initialize hardware\n");
5791                 goto fail;
5792         }
5793
5794         if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
5795                 int ret;
5796                 uint32_t hw_step;
5797
5798                 /*
5799                  * In order to recognize C step the driver should read the
5800                  * chip version id located at the AUX bus MISC address.
5801                  */
5802                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5803                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5804                 DELAY(2);
5805
5806                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5807                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5808                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5809                                    25000);
5810                 if (!ret) {
5811                         device_printf(sc->sc_dev,
5812                             "Failed to wake up the nic\n");
5813                         goto fail;
5814                 }
5815
5816                 if (iwm_nic_lock(sc)) {
5817                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5818                         hw_step |= IWM_ENABLE_WFPM;
5819                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5820                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5821                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5822                         if (hw_step == 0x3)
5823                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5824                                                 (IWM_SILICON_C_STEP << 2);
5825                         iwm_nic_unlock(sc);
5826                 } else {
5827                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
5828                         goto fail;
5829                 }
5830         }
5831
5832         /* Allocate DMA memory for firmware transfers. */
5833         if ((error = iwm_alloc_fwmem(sc)) != 0) {
5834                 device_printf(dev, "could not allocate memory for firmware\n");
5835                 goto fail;
5836         }
5837
5838         /* Allocate "Keep Warm" page. */
5839         if ((error = iwm_alloc_kw(sc)) != 0) {
5840                 device_printf(dev, "could not allocate keep warm page\n");
5841                 goto fail;
5842         }
5843
5844         /* We use ICT interrupts */
5845         if ((error = iwm_alloc_ict(sc)) != 0) {
5846                 device_printf(dev, "could not allocate ICT table\n");
5847                 goto fail;
5848         }
5849
5850         /* Allocate TX scheduler "rings". */
5851         if ((error = iwm_alloc_sched(sc)) != 0) {
5852                 device_printf(dev, "could not allocate TX scheduler rings\n");
5853                 goto fail;
5854         }
5855
5856         /* Allocate TX rings */
5857         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5858                 if ((error = iwm_alloc_tx_ring(sc,
5859                     &sc->txq[txq_i], txq_i)) != 0) {
5860                         device_printf(dev,
5861                             "could not allocate TX ring %d\n",
5862                             txq_i);
5863                         goto fail;
5864                 }
5865         }
5866
5867         /* Allocate RX ring. */
5868         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5869                 device_printf(dev, "could not allocate RX ring\n");
5870                 goto fail;
5871         }
5872
5873         /* Clear pending interrupts. */
5874         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5875
5876         ic->ic_softc = sc;
5877         ic->ic_name = device_get_nameunit(sc->sc_dev);
5878         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
5879         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
5880
5881         /* Set device capabilities. */
5882         ic->ic_caps =
5883             IEEE80211_C_STA |
5884             IEEE80211_C_WPA |           /* WPA/RSN */
5885             IEEE80211_C_WME |
5886             IEEE80211_C_SHSLOT |        /* short slot time supported */
5887             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
5888 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
5889             ;
5890         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5891                 sc->sc_phyctxt[i].id = i;
5892                 sc->sc_phyctxt[i].color = 0;
5893                 sc->sc_phyctxt[i].ref = 0;
5894                 sc->sc_phyctxt[i].channel = NULL;
5895         }
5896
5897         /* Default noise floor */
5898         sc->sc_noise = -96;
5899
5900         /* Max RSSI */
5901         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5902
5903         sc->sc_preinit_hook.ich_func = iwm_preinit;
5904         sc->sc_preinit_hook.ich_arg = sc;
5905         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5906                 device_printf(dev, "config_intrhook_establish failed\n");
5907                 goto fail;
5908         }
5909
5910 #ifdef IWM_DEBUG
5911         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5912             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
5913             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
5914 #endif
5915
5916         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5917             "<-%s\n", __func__);
5918
5919         return 0;
5920
5921         /* Free allocated memory if something failed during attachment. */
5922 fail:
5923         iwm_detach_local(sc, 0);
5924
5925         return ENXIO;
5926 }
5927
5928 static int
5929 iwm_is_valid_ether_addr(uint8_t *addr)
5930 {
5931         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
5932
5933         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
5934                 return (FALSE);
5935
5936         return (TRUE);
5937 }
5938
5939 static int
5940 iwm_update_edca(struct ieee80211com *ic)
5941 {
5942         struct iwm_softc *sc = ic->ic_softc;
5943
5944         device_printf(sc->sc_dev, "%s: called\n", __func__);
5945         return (0);
5946 }
5947
5948 static void
5949 iwm_preinit(void *arg)
5950 {
5951         struct iwm_softc *sc = arg;
5952         device_t dev = sc->sc_dev;
5953         struct ieee80211com *ic = &sc->sc_ic;
5954         int error;
5955
5956         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5957             "->%s\n", __func__);
5958
5959         IWM_LOCK(sc);
5960         if ((error = iwm_start_hw(sc)) != 0) {
5961                 device_printf(dev, "could not initialize hardware\n");
5962                 IWM_UNLOCK(sc);
5963                 goto fail;
5964         }
5965
5966         error = iwm_run_init_mvm_ucode(sc, 1);
5967         iwm_stop_device(sc);
5968         if (error) {
5969                 IWM_UNLOCK(sc);
5970                 goto fail;
5971         }
5972         device_printf(dev,
5973             "hw rev 0x%x, fw ver %s, address %s\n",
5974             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
5975             sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
5976
5977         /* not all hardware can do 5GHz band */
5978         if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
5979                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
5980                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
5981         IWM_UNLOCK(sc);
5982
5983         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
5984             ic->ic_channels);
5985
5986         /*
5987          * At this point we've committed - if we fail to do setup,
5988          * we now also have to tear down the net80211 state.
5989          */
5990         ieee80211_ifattach(ic);
5991         ic->ic_vap_create = iwm_vap_create;
5992         ic->ic_vap_delete = iwm_vap_delete;
5993         ic->ic_raw_xmit = iwm_raw_xmit;
5994         ic->ic_node_alloc = iwm_node_alloc;
5995         ic->ic_scan_start = iwm_scan_start;
5996         ic->ic_scan_end = iwm_scan_end;
5997         ic->ic_update_mcast = iwm_update_mcast;
5998         ic->ic_getradiocaps = iwm_init_channel_map;
5999         ic->ic_set_channel = iwm_set_channel;
6000         ic->ic_scan_curchan = iwm_scan_curchan;
6001         ic->ic_scan_mindwell = iwm_scan_mindwell;
6002         ic->ic_wme.wme_update = iwm_update_edca;
6003         ic->ic_parent = iwm_parent;
6004         ic->ic_transmit = iwm_transmit;
6005         iwm_radiotap_attach(sc);
6006         if (bootverbose)
6007                 ieee80211_announce(ic);
6008
6009         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6010             "<-%s\n", __func__);
6011         config_intrhook_disestablish(&sc->sc_preinit_hook);
6012
6013         return;
6014 fail:
6015         config_intrhook_disestablish(&sc->sc_preinit_hook);
6016         iwm_detach_local(sc, 0);
6017 }
6018
6019 /*
6020  * Attach the interface to 802.11 radiotap.
6021  */
6022 static void
6023 iwm_radiotap_attach(struct iwm_softc *sc)
6024 {
6025         struct ieee80211com *ic = &sc->sc_ic;
6026
6027         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6028             "->%s begin\n", __func__);
6029         ieee80211_radiotap_attach(ic,
6030             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6031                 IWM_TX_RADIOTAP_PRESENT,
6032             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6033                 IWM_RX_RADIOTAP_PRESENT);
6034         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6035             "->%s end\n", __func__);
6036 }
6037
6038 static struct ieee80211vap *
6039 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6040     enum ieee80211_opmode opmode, int flags,
6041     const uint8_t bssid[IEEE80211_ADDR_LEN],
6042     const uint8_t mac[IEEE80211_ADDR_LEN])
6043 {
6044         struct iwm_vap *ivp;
6045         struct ieee80211vap *vap;
6046
6047         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6048                 return NULL;
6049         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6050         vap = &ivp->iv_vap;
6051         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6052         vap->iv_bmissthreshold = 10;            /* override default */
6053         /* Override with driver methods. */
6054         ivp->iv_newstate = vap->iv_newstate;
6055         vap->iv_newstate = iwm_newstate;
6056
6057         ieee80211_ratectl_init(vap);
6058         /* Complete setup. */
6059         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6060             mac);
6061         ic->ic_opmode = opmode;
6062
6063         return vap;
6064 }
6065
6066 static void
6067 iwm_vap_delete(struct ieee80211vap *vap)
6068 {
6069         struct iwm_vap *ivp = IWM_VAP(vap);
6070
6071         ieee80211_ratectl_deinit(vap);
6072         ieee80211_vap_detach(vap);
6073         free(ivp, M_80211_VAP);
6074 }
6075
6076 static void
6077 iwm_scan_start(struct ieee80211com *ic)
6078 {
6079         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6080         struct iwm_softc *sc = ic->ic_softc;
6081         int error;
6082
6083         IWM_LOCK(sc);
6084         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6085                 error = iwm_mvm_umac_scan(sc);
6086         else
6087                 error = iwm_mvm_lmac_scan(sc);
6088         if (error != 0) {
6089                 device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
6090                 IWM_UNLOCK(sc);
6091                 ieee80211_cancel_scan(vap);
6092         } else {
6093                 iwm_led_blink_start(sc);
6094                 IWM_UNLOCK(sc);
6095         }
6096 }
6097
6098 static void
6099 iwm_scan_end(struct ieee80211com *ic)
6100 {
6101         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6102         struct iwm_softc *sc = ic->ic_softc;
6103
6104         IWM_LOCK(sc);
6105         iwm_led_blink_stop(sc);
6106         if (vap->iv_state == IEEE80211_S_RUN)
6107                 iwm_mvm_led_enable(sc);
6108         IWM_UNLOCK(sc);
6109 }
6110
6111 static void
6112 iwm_update_mcast(struct ieee80211com *ic)
6113 {
6114 }
6115
6116 static void
6117 iwm_set_channel(struct ieee80211com *ic)
6118 {
6119 }
6120
6121 static void
6122 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6123 {
6124 }
6125
6126 static void
6127 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6128 {
6129         return;
6130 }
6131
6132 void
6133 iwm_init_task(void *arg1)
6134 {
6135         struct iwm_softc *sc = arg1;
6136
6137         IWM_LOCK(sc);
6138         while (sc->sc_flags & IWM_FLAG_BUSY)
6139                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6140         sc->sc_flags |= IWM_FLAG_BUSY;
6141         iwm_stop(sc);
6142         if (sc->sc_ic.ic_nrunning > 0)
6143                 iwm_init(sc);
6144         sc->sc_flags &= ~IWM_FLAG_BUSY;
6145         wakeup(&sc->sc_flags);
6146         IWM_UNLOCK(sc);
6147 }
6148
6149 static int
6150 iwm_resume(device_t dev)
6151 {
6152         struct iwm_softc *sc = device_get_softc(dev);
6153         int do_reinit = 0;
6154         uint16_t reg;
6155
6156         /* Clear device-specific "PCI retry timeout" register (41h). */
6157         reg = pci_read_config(dev, 0x40, sizeof(reg));
6158         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
6159         iwm_init_task(device_get_softc(dev));
6160
6161         IWM_LOCK(sc);
6162         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6163                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6164                 do_reinit = 1;
6165         }
6166         IWM_UNLOCK(sc);
6167
6168         if (do_reinit)
6169                 ieee80211_resume_all(&sc->sc_ic);
6170
6171         return 0;
6172 }
6173
6174 static int
6175 iwm_suspend(device_t dev)
6176 {
6177         int do_stop = 0;
6178         struct iwm_softc *sc = device_get_softc(dev);
6179
6180         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6181
6182         ieee80211_suspend_all(&sc->sc_ic);
6183
6184         if (do_stop) {
6185                 IWM_LOCK(sc);
6186                 iwm_stop(sc);
6187                 sc->sc_flags |= IWM_FLAG_SCANNING;
6188                 IWM_UNLOCK(sc);
6189         }
6190
6191         return (0);
6192 }
6193
6194 static int
6195 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6196 {
6197         struct iwm_fw_info *fw = &sc->sc_fw;
6198         device_t dev = sc->sc_dev;
6199         int i;
6200
6201         ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6202
6203         callout_drain(&sc->sc_led_blink_to);
6204         callout_drain(&sc->sc_watchdog_to);
6205         iwm_stop_device(sc);
6206         if (do_net80211) {
6207                 ieee80211_ifdetach(&sc->sc_ic);
6208         }
6209
6210         iwm_phy_db_free(sc);
6211
6212         /* Free descriptor rings */
6213         iwm_free_rx_ring(sc, &sc->rxq);
6214         for (i = 0; i < nitems(sc->txq); i++)
6215                 iwm_free_tx_ring(sc, &sc->txq[i]);
6216
6217         /* Free firmware */
6218         if (fw->fw_fp != NULL)
6219                 iwm_fw_info_free(fw);
6220
6221         /* Free scheduler */
6222         iwm_dma_contig_free(&sc->sched_dma);
6223         iwm_dma_contig_free(&sc->ict_dma);
6224         iwm_dma_contig_free(&sc->kw_dma);
6225         iwm_dma_contig_free(&sc->fw_dma);
6226
6227         /* Finished with the hardware - detach things */
6228         iwm_pci_detach(dev);
6229
6230         mbufq_drain(&sc->sc_snd);
6231         IWM_LOCK_DESTROY(sc);
6232
6233         return (0);
6234 }
6235
6236 static int
6237 iwm_detach(device_t dev)
6238 {
6239         struct iwm_softc *sc = device_get_softc(dev);
6240
6241         return (iwm_detach_local(sc, 1));
6242 }
6243
6244 static device_method_t iwm_pci_methods[] = {
6245         /* Device interface */
6246         DEVMETHOD(device_probe,         iwm_probe),
6247         DEVMETHOD(device_attach,        iwm_attach),
6248         DEVMETHOD(device_detach,        iwm_detach),
6249         DEVMETHOD(device_suspend,       iwm_suspend),
6250         DEVMETHOD(device_resume,        iwm_resume),
6251
6252         DEVMETHOD_END
6253 };
6254
6255 static driver_t iwm_pci_driver = {
6256         "iwm",
6257         iwm_pci_methods,
6258         sizeof (struct iwm_softc)
6259 };
6260
6261 static devclass_t iwm_devclass;
6262
6263 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6264 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6265 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6266 MODULE_DEPEND(iwm, wlan, 1, 1, 1);