]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
[iwm] Deduplicate code in iwm_auth() from an if condition.
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109 #include "opt_iwm.h"
110
111 #include <sys/param.h>
112 #include <sys/bus.h>
113 #include <sys/conf.h>
114 #include <sys/endian.h>
115 #include <sys/firmware.h>
116 #include <sys/kernel.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/module.h>
121 #include <sys/proc.h>
122 #include <sys/rman.h>
123 #include <sys/socket.h>
124 #include <sys/sockio.h>
125 #include <sys/sysctl.h>
126 #include <sys/linker.h>
127
128 #include <machine/bus.h>
129 #include <machine/endian.h>
130 #include <machine/resource.h>
131
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcireg.h>
134
135 #include <net/bpf.h>
136
137 #include <net/if.h>
138 #include <net/if_var.h>
139 #include <net/if_arp.h>
140 #include <net/if_dl.h>
141 #include <net/if_media.h>
142 #include <net/if_types.h>
143
144 #include <netinet/in.h>
145 #include <netinet/in_systm.h>
146 #include <netinet/if_ether.h>
147 #include <netinet/ip.h>
148
149 #include <net80211/ieee80211_var.h>
150 #include <net80211/ieee80211_regdomain.h>
151 #include <net80211/ieee80211_ratectl.h>
152 #include <net80211/ieee80211_radiotap.h>
153
154 #include <dev/iwm/if_iwmreg.h>
155 #include <dev/iwm/if_iwmvar.h>
156 #include <dev/iwm/if_iwm_config.h>
157 #include <dev/iwm/if_iwm_debug.h>
158 #include <dev/iwm/if_iwm_notif_wait.h>
159 #include <dev/iwm/if_iwm_util.h>
160 #include <dev/iwm/if_iwm_binding.h>
161 #include <dev/iwm/if_iwm_phy_db.h>
162 #include <dev/iwm/if_iwm_mac_ctxt.h>
163 #include <dev/iwm/if_iwm_phy_ctxt.h>
164 #include <dev/iwm/if_iwm_time_event.h>
165 #include <dev/iwm/if_iwm_power.h>
166 #include <dev/iwm/if_iwm_scan.h>
167
168 #include <dev/iwm/if_iwm_pcie_trans.h>
169 #include <dev/iwm/if_iwm_led.h>
170 #include <dev/iwm/if_iwm_fw.h>
171
172 /* From DragonflyBSD */
173 #define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
174
175 const uint8_t iwm_nvm_channels[] = {
176         /* 2.4 GHz */
177         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
178         /* 5 GHz */
179         36, 40, 44, 48, 52, 56, 60, 64,
180         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
181         149, 153, 157, 161, 165
182 };
183 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
184     "IWM_NUM_CHANNELS is too small");
185
186 const uint8_t iwm_nvm_channels_8000[] = {
187         /* 2.4 GHz */
188         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
189         /* 5 GHz */
190         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
191         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
192         149, 153, 157, 161, 165, 169, 173, 177, 181
193 };
194 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
195     "IWM_NUM_CHANNELS_8000 is too small");
196
197 #define IWM_NUM_2GHZ_CHANNELS   14
198 #define IWM_N_HW_ADDR_MASK      0xF
199
200 /*
201  * XXX For now, there's simply a fixed set of rate table entries
202  * that are populated.
203  */
204 const struct iwm_rate {
205         uint8_t rate;
206         uint8_t plcp;
207 } iwm_rates[] = {
208         {   2,  IWM_RATE_1M_PLCP  },
209         {   4,  IWM_RATE_2M_PLCP  },
210         {  11,  IWM_RATE_5M_PLCP  },
211         {  22,  IWM_RATE_11M_PLCP },
212         {  12,  IWM_RATE_6M_PLCP  },
213         {  18,  IWM_RATE_9M_PLCP  },
214         {  24,  IWM_RATE_12M_PLCP },
215         {  36,  IWM_RATE_18M_PLCP },
216         {  48,  IWM_RATE_24M_PLCP },
217         {  72,  IWM_RATE_36M_PLCP },
218         {  96,  IWM_RATE_48M_PLCP },
219         { 108,  IWM_RATE_54M_PLCP },
220 };
221 #define IWM_RIDX_CCK    0
222 #define IWM_RIDX_OFDM   4
223 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
224 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
225 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
226
227 struct iwm_nvm_section {
228         uint16_t length;
229         uint8_t *data;
230 };
231
232 #define IWM_MVM_UCODE_ALIVE_TIMEOUT     hz
233 #define IWM_MVM_UCODE_CALIB_TIMEOUT     (2*hz)
234
235 struct iwm_mvm_alive_data {
236         int valid;
237         uint32_t scd_base_addr;
238 };
239
240 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
241 static int      iwm_firmware_store_section(struct iwm_softc *,
242                                            enum iwm_ucode_type,
243                                            const uint8_t *, size_t);
244 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
245 static void     iwm_fw_info_free(struct iwm_fw_info *);
246 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
247 static int      iwm_alloc_fwmem(struct iwm_softc *);
248 static int      iwm_alloc_sched(struct iwm_softc *);
249 static int      iwm_alloc_kw(struct iwm_softc *);
250 static int      iwm_alloc_ict(struct iwm_softc *);
251 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
252 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
253 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
255                                   int);
256 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
257 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
258 static void     iwm_enable_interrupts(struct iwm_softc *);
259 static void     iwm_restore_interrupts(struct iwm_softc *);
260 static void     iwm_disable_interrupts(struct iwm_softc *);
261 static void     iwm_ict_reset(struct iwm_softc *);
262 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
263 static void     iwm_stop_device(struct iwm_softc *);
264 static void     iwm_mvm_nic_config(struct iwm_softc *);
265 static int      iwm_nic_rx_init(struct iwm_softc *);
266 static int      iwm_nic_tx_init(struct iwm_softc *);
267 static int      iwm_nic_init(struct iwm_softc *);
268 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
269 static int      iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
270 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
271                                    uint16_t, uint8_t *, uint16_t *);
272 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
273                                      uint16_t *, uint32_t);
274 static uint32_t iwm_eeprom_channel_flags(uint16_t);
275 static void     iwm_add_channel_band(struct iwm_softc *,
276                     struct ieee80211_channel[], int, int *, int, size_t,
277                     const uint8_t[]);
278 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
279                     struct ieee80211_channel[]);
280 static struct iwm_nvm_data *
281         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
282                            const uint16_t *, const uint16_t *,
283                            const uint16_t *, const uint16_t *,
284                            const uint16_t *);
285 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
286 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
287                                                struct iwm_nvm_data *,
288                                                const uint16_t *,
289                                                const uint16_t *);
290 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
291                             const uint16_t *);
292 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
293 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
294                                   const uint16_t *);
295 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
296                                    const uint16_t *);
297 static void     iwm_set_radio_cfg(const struct iwm_softc *,
298                                   struct iwm_nvm_data *, uint32_t);
299 static struct iwm_nvm_data *
300         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
301 static int      iwm_nvm_init(struct iwm_softc *);
302 static int      iwm_pcie_load_section(struct iwm_softc *, uint8_t,
303                                       const struct iwm_fw_desc *);
304 static int      iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
305                                              bus_addr_t, uint32_t);
306 static int      iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
307                                                 const struct iwm_fw_sects *,
308                                                 int, int *);
309 static int      iwm_pcie_load_cpu_sections(struct iwm_softc *,
310                                            const struct iwm_fw_sects *,
311                                            int, int *);
312 static int      iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
313                                                const struct iwm_fw_sects *);
314 static int      iwm_pcie_load_given_ucode(struct iwm_softc *,
315                                           const struct iwm_fw_sects *);
316 static int      iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
317 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
318 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
319 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
320                                               enum iwm_ucode_type);
321 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
322 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
323 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
324                                             struct iwm_rx_phy_info *);
325 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
326                                       struct iwm_rx_packet *);
327 static int      iwm_get_noise(struct iwm_softc *sc,
328                     const struct iwm_mvm_statistics_rx_non_phy *);
329 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
330                                     uint32_t, boolean_t);
331 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
332                                          struct iwm_rx_packet *,
333                                          struct iwm_node *);
334 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
335 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
336 #if 0
337 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
338                                  uint16_t);
339 #endif
340 static const struct iwm_rate *
341         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
342                         struct mbuf *, struct iwm_tx_cmd *);
343 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
344                        struct ieee80211_node *, int);
345 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
346                              const struct ieee80211_bpf_params *);
347 static int      iwm_mvm_flush_tx_path(struct iwm_softc *sc,
348                                       uint32_t tfd_msk, uint32_t flags);
349 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
350                                                 struct iwm_mvm_add_sta_cmd *,
351                                                 int *);
352 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
353                                        int);
354 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
355 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
356 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
357                                            struct iwm_int_sta *,
358                                            const uint8_t *, uint16_t, uint16_t);
359 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
360 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
361 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
362 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
363 static struct ieee80211_node *
364                 iwm_node_alloc(struct ieee80211vap *,
365                                const uint8_t[IEEE80211_ADDR_LEN]);
366 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
367 static int      iwm_media_change(struct ifnet *);
368 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
369 static void     iwm_endscan_cb(void *, int);
370 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
371                                         struct iwm_sf_cfg_cmd *,
372                                         struct ieee80211_node *);
373 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
374 static int      iwm_send_bt_init_conf(struct iwm_softc *);
375 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
376 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
377 static int      iwm_init_hw(struct iwm_softc *);
378 static void     iwm_init(struct iwm_softc *);
379 static void     iwm_start(struct iwm_softc *);
380 static void     iwm_stop(struct iwm_softc *);
381 static void     iwm_watchdog(void *);
382 static void     iwm_parent(struct ieee80211com *);
383 #ifdef IWM_DEBUG
384 static const char *
385                 iwm_desc_lookup(uint32_t);
386 static void     iwm_nic_error(struct iwm_softc *);
387 static void     iwm_nic_umac_error(struct iwm_softc *);
388 #endif
389 static void     iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
390 static void     iwm_notif_intr(struct iwm_softc *);
391 static void     iwm_intr(void *);
392 static int      iwm_attach(device_t);
393 static int      iwm_is_valid_ether_addr(uint8_t *);
394 static void     iwm_preinit(void *);
395 static int      iwm_detach_local(struct iwm_softc *sc, int);
396 static void     iwm_init_task(void *);
397 static void     iwm_radiotap_attach(struct iwm_softc *);
398 static struct ieee80211vap *
399                 iwm_vap_create(struct ieee80211com *,
400                                const char [IFNAMSIZ], int,
401                                enum ieee80211_opmode, int,
402                                const uint8_t [IEEE80211_ADDR_LEN],
403                                const uint8_t [IEEE80211_ADDR_LEN]);
404 static void     iwm_vap_delete(struct ieee80211vap *);
405 static void     iwm_scan_start(struct ieee80211com *);
406 static void     iwm_scan_end(struct ieee80211com *);
407 static void     iwm_update_mcast(struct ieee80211com *);
408 static void     iwm_set_channel(struct ieee80211com *);
409 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
410 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
411 static int      iwm_detach(device_t);
412
413 /*
414  * Firmware parser.
415  */
416
417 static int
418 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
419 {
420         const struct iwm_fw_cscheme_list *l = (const void *)data;
421
422         if (dlen < sizeof(*l) ||
423             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
424                 return EINVAL;
425
426         /* we don't actually store anything for now, always use s/w crypto */
427
428         return 0;
429 }
430
431 static int
432 iwm_firmware_store_section(struct iwm_softc *sc,
433     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
434 {
435         struct iwm_fw_sects *fws;
436         struct iwm_fw_desc *fwone;
437
438         if (type >= IWM_UCODE_TYPE_MAX)
439                 return EINVAL;
440         if (dlen < sizeof(uint32_t))
441                 return EINVAL;
442
443         fws = &sc->sc_fw.fw_sects[type];
444         if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
445                 return EINVAL;
446
447         fwone = &fws->fw_sect[fws->fw_count];
448
449         /* first 32bit are device load offset */
450         memcpy(&fwone->offset, data, sizeof(uint32_t));
451
452         /* rest is data */
453         fwone->data = data + sizeof(uint32_t);
454         fwone->len = dlen - sizeof(uint32_t);
455
456         fws->fw_count++;
457
458         return 0;
459 }
460
461 #define IWM_DEFAULT_SCAN_CHANNELS 40
462
463 /* iwlwifi: iwl-drv.c */
464 struct iwm_tlv_calib_data {
465         uint32_t ucode_type;
466         struct iwm_tlv_calib_ctrl calib;
467 } __packed;
468
469 static int
470 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
471 {
472         const struct iwm_tlv_calib_data *def_calib = data;
473         uint32_t ucode_type = le32toh(def_calib->ucode_type);
474
475         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
476                 device_printf(sc->sc_dev,
477                     "Wrong ucode_type %u for default "
478                     "calibration.\n", ucode_type);
479                 return EINVAL;
480         }
481
482         sc->sc_default_calib[ucode_type].flow_trigger =
483             def_calib->calib.flow_trigger;
484         sc->sc_default_calib[ucode_type].event_trigger =
485             def_calib->calib.event_trigger;
486
487         return 0;
488 }
489
490 static int
491 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
492                         struct iwm_ucode_capabilities *capa)
493 {
494         const struct iwm_ucode_api *ucode_api = (const void *)data;
495         uint32_t api_index = le32toh(ucode_api->api_index);
496         uint32_t api_flags = le32toh(ucode_api->api_flags);
497         int i;
498
499         if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
500                 device_printf(sc->sc_dev,
501                     "api flags index %d larger than supported by driver\n",
502                     api_index);
503                 /* don't return an error so we can load FW that has more bits */
504                 return 0;
505         }
506
507         for (i = 0; i < 32; i++) {
508                 if (api_flags & (1U << i))
509                         setbit(capa->enabled_api, i + 32 * api_index);
510         }
511
512         return 0;
513 }
514
515 static int
516 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
517                            struct iwm_ucode_capabilities *capa)
518 {
519         const struct iwm_ucode_capa *ucode_capa = (const void *)data;
520         uint32_t api_index = le32toh(ucode_capa->api_index);
521         uint32_t api_flags = le32toh(ucode_capa->api_capa);
522         int i;
523
524         if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
525                 device_printf(sc->sc_dev,
526                     "capa flags index %d larger than supported by driver\n",
527                     api_index);
528                 /* don't return an error so we can load FW that has more bits */
529                 return 0;
530         }
531
532         for (i = 0; i < 32; i++) {
533                 if (api_flags & (1U << i))
534                         setbit(capa->enabled_capa, i + 32 * api_index);
535         }
536
537         return 0;
538 }
539
540 static void
541 iwm_fw_info_free(struct iwm_fw_info *fw)
542 {
543         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
544         fw->fw_fp = NULL;
545         /* don't touch fw->fw_status */
546         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
547 }
548
549 static int
550 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
551 {
552         struct iwm_fw_info *fw = &sc->sc_fw;
553         const struct iwm_tlv_ucode_header *uhdr;
554         const struct iwm_ucode_tlv *tlv;
555         struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
556         enum iwm_ucode_tlv_type tlv_type;
557         const struct firmware *fwp;
558         const uint8_t *data;
559         uint32_t tlv_len;
560         uint32_t usniffer_img;
561         const uint8_t *tlv_data;
562         uint32_t paging_mem_size;
563         int num_of_cpus;
564         int error = 0;
565         size_t len;
566
567         if (fw->fw_status == IWM_FW_STATUS_DONE &&
568             ucode_type != IWM_UCODE_INIT)
569                 return 0;
570
571         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
572                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
573         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
574
575         if (fw->fw_fp != NULL)
576                 iwm_fw_info_free(fw);
577
578         /*
579          * Load firmware into driver memory.
580          * fw_fp will be set.
581          */
582         IWM_UNLOCK(sc);
583         fwp = firmware_get(sc->cfg->fw_name);
584         IWM_LOCK(sc);
585         if (fwp == NULL) {
586                 device_printf(sc->sc_dev,
587                     "could not read firmware %s (error %d)\n",
588                     sc->cfg->fw_name, error);
589                 goto out;
590         }
591         fw->fw_fp = fwp;
592
593         /* (Re-)Initialize default values. */
594         capa->flags = 0;
595         capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
596         capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
597         memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
598         memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
599         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
600
601         /*
602          * Parse firmware contents
603          */
604
605         uhdr = (const void *)fw->fw_fp->data;
606         if (*(const uint32_t *)fw->fw_fp->data != 0
607             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
608                 device_printf(sc->sc_dev, "invalid firmware %s\n",
609                     sc->cfg->fw_name);
610                 error = EINVAL;
611                 goto out;
612         }
613
614         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
615             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
616             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
617             IWM_UCODE_API(le32toh(uhdr->ver)));
618         data = uhdr->data;
619         len = fw->fw_fp->datasize - sizeof(*uhdr);
620
621         while (len >= sizeof(*tlv)) {
622                 len -= sizeof(*tlv);
623                 tlv = (const void *)data;
624
625                 tlv_len = le32toh(tlv->length);
626                 tlv_type = le32toh(tlv->type);
627                 tlv_data = tlv->data;
628
629                 if (len < tlv_len) {
630                         device_printf(sc->sc_dev,
631                             "firmware too short: %zu bytes\n",
632                             len);
633                         error = EINVAL;
634                         goto parse_out;
635                 }
636                 len -= roundup2(tlv_len, 4);
637                 data += sizeof(tlv) + roundup2(tlv_len, 4);
638
639                 switch ((int)tlv_type) {
640                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
641                         if (tlv_len != sizeof(uint32_t)) {
642                                 device_printf(sc->sc_dev,
643                                     "%s: PROBE_MAX_LEN (%d) != sizeof(uint32_t)\n",
644                                     __func__,
645                                     (int) tlv_len);
646                                 error = EINVAL;
647                                 goto parse_out;
648                         }
649                         capa->max_probe_length =
650                             le32_to_cpup((const uint32_t *)tlv_data);
651                         /* limit it to something sensible */
652                         if (capa->max_probe_length >
653                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
654                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
655                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
656                                     "ridiculous\n", __func__);
657                                 error = EINVAL;
658                                 goto parse_out;
659                         }
660                         break;
661                 case IWM_UCODE_TLV_PAN:
662                         if (tlv_len) {
663                                 device_printf(sc->sc_dev,
664                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
665                                     __func__,
666                                     (int) tlv_len);
667                                 error = EINVAL;
668                                 goto parse_out;
669                         }
670                         capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
671                         break;
672                 case IWM_UCODE_TLV_FLAGS:
673                         if (tlv_len < sizeof(uint32_t)) {
674                                 device_printf(sc->sc_dev,
675                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
676                                     __func__,
677                                     (int) tlv_len);
678                                 error = EINVAL;
679                                 goto parse_out;
680                         }
681                         if (tlv_len % sizeof(uint32_t)) {
682                                 device_printf(sc->sc_dev,
683                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) %% sizeof(uint32_t)\n",
684                                     __func__,
685                                     (int) tlv_len);
686                                 error = EINVAL;
687                                 goto parse_out;
688                         }
689                         /*
690                          * Apparently there can be many flags, but Linux driver
691                          * parses only the first one, and so do we.
692                          *
693                          * XXX: why does this override IWM_UCODE_TLV_PAN?
694                          * Intentional or a bug?  Observations from
695                          * current firmware file:
696                          *  1) TLV_PAN is parsed first
697                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
698                          * ==> this resets TLV_PAN to itself... hnnnk
699                          */
700                         capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
701                         break;
702                 case IWM_UCODE_TLV_CSCHEME:
703                         if ((error = iwm_store_cscheme(sc,
704                             tlv_data, tlv_len)) != 0) {
705                                 device_printf(sc->sc_dev,
706                                     "%s: iwm_store_cscheme(): returned %d\n",
707                                     __func__,
708                                     error);
709                                 goto parse_out;
710                         }
711                         break;
712                 case IWM_UCODE_TLV_NUM_OF_CPU:
713                         if (tlv_len != sizeof(uint32_t)) {
714                                 device_printf(sc->sc_dev,
715                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
716                                     __func__,
717                                     (int) tlv_len);
718                                 error = EINVAL;
719                                 goto parse_out;
720                         }
721                         num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
722                         if (num_of_cpus == 2) {
723                                 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
724                                         TRUE;
725                                 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
726                                         TRUE;
727                                 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
728                                         TRUE;
729                         } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
730                                 device_printf(sc->sc_dev,
731                                     "%s: Driver supports only 1 or 2 CPUs\n",
732                                     __func__);
733                                 error = EINVAL;
734                                 goto parse_out;
735                         }
736                         break;
737                 case IWM_UCODE_TLV_SEC_RT:
738                         if ((error = iwm_firmware_store_section(sc,
739                             IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
740                                 device_printf(sc->sc_dev,
741                                     "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
742                                     __func__,
743                                     error);
744                                 goto parse_out;
745                         }
746                         break;
747                 case IWM_UCODE_TLV_SEC_INIT:
748                         if ((error = iwm_firmware_store_section(sc,
749                             IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
750                                 device_printf(sc->sc_dev,
751                                     "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
752                                     __func__,
753                                     error);
754                                 goto parse_out;
755                         }
756                         break;
757                 case IWM_UCODE_TLV_SEC_WOWLAN:
758                         if ((error = iwm_firmware_store_section(sc,
759                             IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
760                                 device_printf(sc->sc_dev,
761                                     "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
762                                     __func__,
763                                     error);
764                                 goto parse_out;
765                         }
766                         break;
767                 case IWM_UCODE_TLV_DEF_CALIB:
768                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
769                                 device_printf(sc->sc_dev,
770                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
771                                     __func__,
772                                     (int) tlv_len,
773                                     (int) sizeof(struct iwm_tlv_calib_data));
774                                 error = EINVAL;
775                                 goto parse_out;
776                         }
777                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
778                                 device_printf(sc->sc_dev,
779                                     "%s: iwm_set_default_calib() failed: %d\n",
780                                     __func__,
781                                     error);
782                                 goto parse_out;
783                         }
784                         break;
785                 case IWM_UCODE_TLV_PHY_SKU:
786                         if (tlv_len != sizeof(uint32_t)) {
787                                 error = EINVAL;
788                                 device_printf(sc->sc_dev,
789                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
790                                     __func__,
791                                     (int) tlv_len);
792                                 goto parse_out;
793                         }
794                         sc->sc_fw.phy_config =
795                             le32_to_cpup((const uint32_t *)tlv_data);
796                         sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
797                                                   IWM_FW_PHY_CFG_TX_CHAIN) >>
798                                                   IWM_FW_PHY_CFG_TX_CHAIN_POS;
799                         sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
800                                                   IWM_FW_PHY_CFG_RX_CHAIN) >>
801                                                   IWM_FW_PHY_CFG_RX_CHAIN_POS;
802                         break;
803
804                 case IWM_UCODE_TLV_API_CHANGES_SET: {
805                         if (tlv_len != sizeof(struct iwm_ucode_api)) {
806                                 error = EINVAL;
807                                 goto parse_out;
808                         }
809                         if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
810                                 error = EINVAL;
811                                 goto parse_out;
812                         }
813                         break;
814                 }
815
816                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
817                         if (tlv_len != sizeof(struct iwm_ucode_capa)) {
818                                 error = EINVAL;
819                                 goto parse_out;
820                         }
821                         if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
822                                 error = EINVAL;
823                                 goto parse_out;
824                         }
825                         break;
826                 }
827
828                 case 48: /* undocumented TLV */
829                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
830                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
831                         /* ignore, not used by current driver */
832                         break;
833
834                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
835                         if ((error = iwm_firmware_store_section(sc,
836                             IWM_UCODE_REGULAR_USNIFFER, tlv_data,
837                             tlv_len)) != 0)
838                                 goto parse_out;
839                         break;
840
841                 case IWM_UCODE_TLV_PAGING:
842                         if (tlv_len != sizeof(uint32_t)) {
843                                 error = EINVAL;
844                                 goto parse_out;
845                         }
846                         paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
847
848                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
849                             "%s: Paging: paging enabled (size = %u bytes)\n",
850                             __func__, paging_mem_size);
851                         if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
852                                 device_printf(sc->sc_dev,
853                                         "%s: Paging: driver supports up to %u bytes for paging image\n",
854                                         __func__, IWM_MAX_PAGING_IMAGE_SIZE);
855                                 error = EINVAL;
856                                 goto out;
857                         }
858                         if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
859                                 device_printf(sc->sc_dev,
860                                     "%s: Paging: image isn't multiple %u\n",
861                                     __func__, IWM_FW_PAGING_SIZE);
862                                 error = EINVAL;
863                                 goto out;
864                         }
865
866                         sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
867                             paging_mem_size;
868                         usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
869                         sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
870                             paging_mem_size;
871                         break;
872
873                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
874                         if (tlv_len != sizeof(uint32_t)) {
875                                 error = EINVAL;
876                                 goto parse_out;
877                         }
878                         capa->n_scan_channels =
879                             le32_to_cpup((const uint32_t *)tlv_data);
880                         break;
881
882                 case IWM_UCODE_TLV_FW_VERSION:
883                         if (tlv_len != sizeof(uint32_t) * 3) {
884                                 error = EINVAL;
885                                 goto parse_out;
886                         }
887                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
888                             "%d.%d.%d",
889                             le32toh(((const uint32_t *)tlv_data)[0]),
890                             le32toh(((const uint32_t *)tlv_data)[1]),
891                             le32toh(((const uint32_t *)tlv_data)[2]));
892                         break;
893
894                 case IWM_UCODE_TLV_FW_MEM_SEG:
895                         break;
896
897                 default:
898                         device_printf(sc->sc_dev,
899                             "%s: unknown firmware section %d, abort\n",
900                             __func__, tlv_type);
901                         error = EINVAL;
902                         goto parse_out;
903                 }
904         }
905
906         KASSERT(error == 0, ("unhandled error"));
907
908  parse_out:
909         if (error) {
910                 device_printf(sc->sc_dev, "firmware parse error %d, "
911                     "section type %d\n", error, tlv_type);
912         }
913
914  out:
915         if (error) {
916                 fw->fw_status = IWM_FW_STATUS_NONE;
917                 if (fw->fw_fp != NULL)
918                         iwm_fw_info_free(fw);
919         } else
920                 fw->fw_status = IWM_FW_STATUS_DONE;
921         wakeup(&sc->sc_fw);
922
923         return error;
924 }
925
926 /*
927  * DMA resource routines
928  */
929
930 /* fwmem is used to load firmware onto the card */
931 static int
932 iwm_alloc_fwmem(struct iwm_softc *sc)
933 {
934         /* Must be aligned on a 16-byte boundary. */
935         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
936             IWM_FH_MEM_TB_MAX_LENGTH, 16);
937 }
938
939 /* tx scheduler rings.  not used? */
940 static int
941 iwm_alloc_sched(struct iwm_softc *sc)
942 {
943         /* TX scheduler rings must be aligned on a 1KB boundary. */
944         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
945             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
946 }
947
948 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
949 static int
950 iwm_alloc_kw(struct iwm_softc *sc)
951 {
952         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
953 }
954
955 /* interrupt cause table */
956 static int
957 iwm_alloc_ict(struct iwm_softc *sc)
958 {
959         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
960             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
961 }
962
963 static int
964 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
965 {
966         bus_size_t size;
967         int i, error;
968
969         ring->cur = 0;
970
971         /* Allocate RX descriptors (256-byte aligned). */
972         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
973         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
974         if (error != 0) {
975                 device_printf(sc->sc_dev,
976                     "could not allocate RX ring DMA memory\n");
977                 goto fail;
978         }
979         ring->desc = ring->desc_dma.vaddr;
980
981         /* Allocate RX status area (16-byte aligned). */
982         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
983             sizeof(*ring->stat), 16);
984         if (error != 0) {
985                 device_printf(sc->sc_dev,
986                     "could not allocate RX status DMA memory\n");
987                 goto fail;
988         }
989         ring->stat = ring->stat_dma.vaddr;
990
991         /* Create RX buffer DMA tag. */
992         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
993             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
994             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
995         if (error != 0) {
996                 device_printf(sc->sc_dev,
997                     "%s: could not create RX buf DMA tag, error %d\n",
998                     __func__, error);
999                 goto fail;
1000         }
1001
1002         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1003         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1004         if (error != 0) {
1005                 device_printf(sc->sc_dev,
1006                     "%s: could not create RX buf DMA map, error %d\n",
1007                     __func__, error);
1008                 goto fail;
1009         }
1010         /*
1011          * Allocate and map RX buffers.
1012          */
1013         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1014                 struct iwm_rx_data *data = &ring->data[i];
1015                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1016                 if (error != 0) {
1017                         device_printf(sc->sc_dev,
1018                             "%s: could not create RX buf DMA map, error %d\n",
1019                             __func__, error);
1020                         goto fail;
1021                 }
1022                 data->m = NULL;
1023
1024                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1025                         goto fail;
1026                 }
1027         }
1028         return 0;
1029
1030 fail:   iwm_free_rx_ring(sc, ring);
1031         return error;
1032 }
1033
1034 static void
1035 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1036 {
1037         /* Reset the ring state */
1038         ring->cur = 0;
1039
1040         /*
1041          * The hw rx ring index in shared memory must also be cleared,
1042          * otherwise the discrepancy can cause reprocessing chaos.
1043          */
1044         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1045 }
1046
1047 static void
1048 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1049 {
1050         int i;
1051
1052         iwm_dma_contig_free(&ring->desc_dma);
1053         iwm_dma_contig_free(&ring->stat_dma);
1054
1055         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1056                 struct iwm_rx_data *data = &ring->data[i];
1057
1058                 if (data->m != NULL) {
1059                         bus_dmamap_sync(ring->data_dmat, data->map,
1060                             BUS_DMASYNC_POSTREAD);
1061                         bus_dmamap_unload(ring->data_dmat, data->map);
1062                         m_freem(data->m);
1063                         data->m = NULL;
1064                 }
1065                 if (data->map != NULL) {
1066                         bus_dmamap_destroy(ring->data_dmat, data->map);
1067                         data->map = NULL;
1068                 }
1069         }
1070         if (ring->spare_map != NULL) {
1071                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1072                 ring->spare_map = NULL;
1073         }
1074         if (ring->data_dmat != NULL) {
1075                 bus_dma_tag_destroy(ring->data_dmat);
1076                 ring->data_dmat = NULL;
1077         }
1078 }
1079
1080 static int
1081 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1082 {
1083         bus_addr_t paddr;
1084         bus_size_t size;
1085         size_t maxsize;
1086         int nsegments;
1087         int i, error;
1088
1089         ring->qid = qid;
1090         ring->queued = 0;
1091         ring->cur = 0;
1092
1093         /* Allocate TX descriptors (256-byte aligned). */
1094         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1095         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1096         if (error != 0) {
1097                 device_printf(sc->sc_dev,
1098                     "could not allocate TX ring DMA memory\n");
1099                 goto fail;
1100         }
1101         ring->desc = ring->desc_dma.vaddr;
1102
1103         /*
1104          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1105          * to allocate commands space for other rings.
1106          */
1107         if (qid > IWM_MVM_CMD_QUEUE)
1108                 return 0;
1109
1110         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1111         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1112         if (error != 0) {
1113                 device_printf(sc->sc_dev,
1114                     "could not allocate TX cmd DMA memory\n");
1115                 goto fail;
1116         }
1117         ring->cmd = ring->cmd_dma.vaddr;
1118
1119         /* FW commands may require more mapped space than packets. */
1120         if (qid == IWM_MVM_CMD_QUEUE) {
1121                 maxsize = IWM_RBUF_SIZE;
1122                 nsegments = 1;
1123         } else {
1124                 maxsize = MCLBYTES;
1125                 nsegments = IWM_MAX_SCATTER - 2;
1126         }
1127
1128         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1129             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1130             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1131         if (error != 0) {
1132                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1133                 goto fail;
1134         }
1135
1136         paddr = ring->cmd_dma.paddr;
1137         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1138                 struct iwm_tx_data *data = &ring->data[i];
1139
1140                 data->cmd_paddr = paddr;
1141                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1142                     + offsetof(struct iwm_tx_cmd, scratch);
1143                 paddr += sizeof(struct iwm_device_cmd);
1144
1145                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1146                 if (error != 0) {
1147                         device_printf(sc->sc_dev,
1148                             "could not create TX buf DMA map\n");
1149                         goto fail;
1150                 }
1151         }
1152         KASSERT(paddr == ring->cmd_dma.paddr + size,
1153             ("invalid physical address"));
1154         return 0;
1155
1156 fail:   iwm_free_tx_ring(sc, ring);
1157         return error;
1158 }
1159
1160 static void
1161 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1162 {
1163         int i;
1164
1165         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1166                 struct iwm_tx_data *data = &ring->data[i];
1167
1168                 if (data->m != NULL) {
1169                         bus_dmamap_sync(ring->data_dmat, data->map,
1170                             BUS_DMASYNC_POSTWRITE);
1171                         bus_dmamap_unload(ring->data_dmat, data->map);
1172                         m_freem(data->m);
1173                         data->m = NULL;
1174                 }
1175         }
1176         /* Clear TX descriptors. */
1177         memset(ring->desc, 0, ring->desc_dma.size);
1178         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1179             BUS_DMASYNC_PREWRITE);
1180         sc->qfullmsk &= ~(1 << ring->qid);
1181         ring->queued = 0;
1182         ring->cur = 0;
1183
1184         if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1185                 iwm_pcie_clear_cmd_in_flight(sc);
1186 }
1187
1188 static void
1189 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1190 {
1191         int i;
1192
1193         iwm_dma_contig_free(&ring->desc_dma);
1194         iwm_dma_contig_free(&ring->cmd_dma);
1195
1196         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1197                 struct iwm_tx_data *data = &ring->data[i];
1198
1199                 if (data->m != NULL) {
1200                         bus_dmamap_sync(ring->data_dmat, data->map,
1201                             BUS_DMASYNC_POSTWRITE);
1202                         bus_dmamap_unload(ring->data_dmat, data->map);
1203                         m_freem(data->m);
1204                         data->m = NULL;
1205                 }
1206                 if (data->map != NULL) {
1207                         bus_dmamap_destroy(ring->data_dmat, data->map);
1208                         data->map = NULL;
1209                 }
1210         }
1211         if (ring->data_dmat != NULL) {
1212                 bus_dma_tag_destroy(ring->data_dmat);
1213                 ring->data_dmat = NULL;
1214         }
1215 }
1216
1217 /*
1218  * High-level hardware frobbing routines
1219  */
1220
1221 static void
1222 iwm_enable_interrupts(struct iwm_softc *sc)
1223 {
1224         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1225         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1226 }
1227
1228 static void
1229 iwm_restore_interrupts(struct iwm_softc *sc)
1230 {
1231         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1232 }
1233
1234 static void
1235 iwm_disable_interrupts(struct iwm_softc *sc)
1236 {
1237         /* disable interrupts */
1238         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1239
1240         /* acknowledge all interrupts */
1241         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1242         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1243 }
1244
1245 static void
1246 iwm_ict_reset(struct iwm_softc *sc)
1247 {
1248         iwm_disable_interrupts(sc);
1249
1250         /* Reset ICT table. */
1251         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1252         sc->ict_cur = 0;
1253
1254         /* Set physical address of ICT table (4KB aligned). */
1255         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1256             IWM_CSR_DRAM_INT_TBL_ENABLE
1257             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1258             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1259             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1260
1261         /* Switch to ICT interrupt mode in driver. */
1262         sc->sc_flags |= IWM_FLAG_USE_ICT;
1263
1264         /* Re-enable interrupts. */
1265         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1266         iwm_enable_interrupts(sc);
1267 }
1268
1269 /* iwlwifi pcie/trans.c */
1270
1271 /*
1272  * Since this .. hard-resets things, it's time to actually
1273  * mark the first vap (if any) as having no mac context.
1274  * It's annoying, but since the driver is potentially being
1275  * stop/start'ed whilst active (thanks openbsd port!) we
1276  * have to correctly track this.
1277  */
1278 static void
1279 iwm_stop_device(struct iwm_softc *sc)
1280 {
1281         struct ieee80211com *ic = &sc->sc_ic;
1282         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1283         int chnl, qid;
1284         uint32_t mask = 0;
1285
1286         /* tell the device to stop sending interrupts */
1287         iwm_disable_interrupts(sc);
1288
1289         /*
1290          * FreeBSD-local: mark the first vap as not-uploaded,
1291          * so the next transition through auth/assoc
1292          * will correctly populate the MAC context.
1293          */
1294         if (vap) {
1295                 struct iwm_vap *iv = IWM_VAP(vap);
1296                 iv->phy_ctxt = NULL;
1297                 iv->is_uploaded = 0;
1298         }
1299
1300         /* device going down, Stop using ICT table */
1301         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1302
1303         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1304
1305         if (iwm_nic_lock(sc)) {
1306                 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1307
1308                 /* Stop each Tx DMA channel */
1309                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1310                         IWM_WRITE(sc,
1311                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1312                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1313                 }
1314
1315                 /* Wait for DMA channels to be idle */
1316                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1317                     5000)) {
1318                         device_printf(sc->sc_dev,
1319                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1320                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1321                 }
1322                 iwm_nic_unlock(sc);
1323         }
1324         iwm_pcie_rx_stop(sc);
1325
1326         /* Stop RX ring. */
1327         iwm_reset_rx_ring(sc, &sc->rxq);
1328
1329         /* Reset all TX rings. */
1330         for (qid = 0; qid < nitems(sc->txq); qid++)
1331                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1332
1333         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1334                 /* Power-down device's busmaster DMA clocks */
1335                 if (iwm_nic_lock(sc)) {
1336                         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1337                             IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1338                         iwm_nic_unlock(sc);
1339                 }
1340                 DELAY(5);
1341         }
1342
1343         /* Make sure (redundant) we've released our request to stay awake */
1344         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1345             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1346
1347         /* Stop the device, and put it in low power state */
1348         iwm_apm_stop(sc);
1349
1350         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1351          * Clean again the interrupt here
1352          */
1353         iwm_disable_interrupts(sc);
1354         /* stop and reset the on-board processor */
1355         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1356
1357         /*
1358          * Even if we stop the HW, we still want the RF kill
1359          * interrupt
1360          */
1361         iwm_enable_rfkill_int(sc);
1362         iwm_check_rfkill(sc);
1363 }
1364
1365 /* iwlwifi: mvm/ops.c */
1366 static void
1367 iwm_mvm_nic_config(struct iwm_softc *sc)
1368 {
1369         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1370         uint32_t reg_val = 0;
1371         uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1372
1373         radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1374             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1375         radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1376             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1377         radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1378             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1379
1380         /* SKU control */
1381         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1382             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1383         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1384             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1385
1386         /* radio configuration */
1387         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1388         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1389         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1390
1391         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1392
1393         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1394             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1395             radio_cfg_step, radio_cfg_dash);
1396
1397         /*
1398          * W/A : NIC is stuck in a reset state after Early PCIe power off
1399          * (PCIe power is lost before PERST# is asserted), causing ME FW
1400          * to lose ownership and not being able to obtain it back.
1401          */
1402         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1403                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1404                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1405                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1406         }
1407 }
1408
1409 static int
1410 iwm_nic_rx_init(struct iwm_softc *sc)
1411 {
1412         /*
1413          * Initialize RX ring.  This is from the iwn driver.
1414          */
1415         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1416
1417         /* Stop Rx DMA */
1418         iwm_pcie_rx_stop(sc);
1419
1420         if (!iwm_nic_lock(sc))
1421                 return EBUSY;
1422
1423         /* reset and flush pointers */
1424         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1425         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1426         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1427         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1428
1429         /* Set physical address of RX ring (256-byte aligned). */
1430         IWM_WRITE(sc,
1431             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1432
1433         /* Set physical address of RX status (16-byte aligned). */
1434         IWM_WRITE(sc,
1435             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1436
1437         /* Enable Rx DMA
1438          * XXX 5000 HW isn't supported by the iwm(4) driver.
1439          * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1440          *      the credit mechanism in 5000 HW RX FIFO
1441          * Direct rx interrupts to hosts
1442          * Rx buffer size 4 or 8k or 12k
1443          * RB timeout 0x10
1444          * 256 RBDs
1445          */
1446         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1447             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1448             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1449             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1450             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1451             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1452             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1453
1454         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1455
1456         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1457         if (sc->cfg->host_interrupt_operation_mode)
1458                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1459
1460         /*
1461          * Thus sayeth el jefe (iwlwifi) via a comment:
1462          *
1463          * This value should initially be 0 (before preparing any
1464          * RBs), should be 8 after preparing the first 8 RBs (for example)
1465          */
1466         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1467
1468         iwm_nic_unlock(sc);
1469
1470         return 0;
1471 }
1472
1473 static int
1474 iwm_nic_tx_init(struct iwm_softc *sc)
1475 {
1476         int qid;
1477
1478         if (!iwm_nic_lock(sc))
1479                 return EBUSY;
1480
1481         /* Deactivate TX scheduler. */
1482         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1483
1484         /* Set physical address of "keep warm" page (16-byte aligned). */
1485         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1486
1487         /* Initialize TX rings. */
1488         for (qid = 0; qid < nitems(sc->txq); qid++) {
1489                 struct iwm_tx_ring *txq = &sc->txq[qid];
1490
1491                 /* Set physical address of TX ring (256-byte aligned). */
1492                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1493                     txq->desc_dma.paddr >> 8);
1494                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1495                     "%s: loading ring %d descriptors (%p) at %lx\n",
1496                     __func__,
1497                     qid, txq->desc,
1498                     (unsigned long) (txq->desc_dma.paddr >> 8));
1499         }
1500
1501         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1502
1503         iwm_nic_unlock(sc);
1504
1505         return 0;
1506 }
1507
1508 static int
1509 iwm_nic_init(struct iwm_softc *sc)
1510 {
1511         int error;
1512
1513         iwm_apm_init(sc);
1514         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1515                 iwm_set_pwr(sc);
1516
1517         iwm_mvm_nic_config(sc);
1518
1519         if ((error = iwm_nic_rx_init(sc)) != 0)
1520                 return error;
1521
1522         /*
1523          * Ditto for TX, from iwn
1524          */
1525         if ((error = iwm_nic_tx_init(sc)) != 0)
1526                 return error;
1527
1528         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1529             "%s: shadow registers enabled\n", __func__);
1530         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1531
1532         return 0;
1533 }
1534
1535 static int
1536 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1537 {
1538         if (!iwm_nic_lock(sc)) {
1539                 device_printf(sc->sc_dev,
1540                     "%s: cannot enable txq %d\n",
1541                     __func__,
1542                     qid);
1543                 return EBUSY;
1544         }
1545
1546         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1547
1548         if (qid == IWM_MVM_CMD_QUEUE) {
1549                 /* unactivate before configuration */
1550                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1551                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1552                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1553
1554                 iwm_nic_unlock(sc);
1555
1556                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1557
1558                 if (!iwm_nic_lock(sc)) {
1559                         device_printf(sc->sc_dev,
1560                             "%s: cannot enable txq %d\n", __func__, qid);
1561                         return EBUSY;
1562                 }
1563                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1564                 iwm_nic_unlock(sc);
1565
1566                 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1567                 /* Set scheduler window size and frame limit. */
1568                 iwm_write_mem32(sc,
1569                     sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1570                     sizeof(uint32_t),
1571                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1572                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1573                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1574                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1575
1576                 if (!iwm_nic_lock(sc)) {
1577                         device_printf(sc->sc_dev,
1578                             "%s: cannot enable txq %d\n", __func__, qid);
1579                         return EBUSY;
1580                 }
1581                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1582                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1583                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1584                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1585                     IWM_SCD_QUEUE_STTS_REG_MSK);
1586         } else {
1587                 struct iwm_scd_txq_cfg_cmd cmd;
1588                 int error;
1589
1590                 iwm_nic_unlock(sc);
1591
1592                 memset(&cmd, 0, sizeof(cmd));
1593                 cmd.scd_queue = qid;
1594                 cmd.enable = 1;
1595                 cmd.sta_id = sta_id;
1596                 cmd.tx_fifo = fifo;
1597                 cmd.aggregate = 0;
1598                 cmd.window = IWM_FRAME_LIMIT;
1599
1600                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1601                     sizeof(cmd), &cmd);
1602                 if (error) {
1603                         device_printf(sc->sc_dev,
1604                             "cannot enable txq %d\n", qid);
1605                         return error;
1606                 }
1607
1608                 if (!iwm_nic_lock(sc))
1609                         return EBUSY;
1610         }
1611
1612         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1613             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1614
1615         iwm_nic_unlock(sc);
1616
1617         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1618             __func__, qid, fifo);
1619
1620         return 0;
1621 }
1622
1623 static int
1624 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1625 {
1626         int error, chnl;
1627
1628         int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1629             IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1630
1631         if (!iwm_nic_lock(sc))
1632                 return EBUSY;
1633
1634         iwm_ict_reset(sc);
1635
1636         sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1637         if (scd_base_addr != 0 &&
1638             scd_base_addr != sc->scd_base_addr) {
1639                 device_printf(sc->sc_dev,
1640                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1641                     __func__, sc->scd_base_addr, scd_base_addr);
1642         }
1643
1644         iwm_nic_unlock(sc);
1645
1646         /* reset context data, TX status and translation data */
1647         error = iwm_write_mem(sc,
1648             sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1649             NULL, clear_dwords);
1650         if (error)
1651                 return EBUSY;
1652
1653         if (!iwm_nic_lock(sc))
1654                 return EBUSY;
1655
1656         /* Set physical address of TX scheduler rings (1KB aligned). */
1657         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1658
1659         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1660
1661         iwm_nic_unlock(sc);
1662
1663         /* enable command channel */
1664         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1665         if (error)
1666                 return error;
1667
1668         if (!iwm_nic_lock(sc))
1669                 return EBUSY;
1670
1671         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1672
1673         /* Enable DMA channels. */
1674         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1675                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1676                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1677                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1678         }
1679
1680         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1681             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1682
1683         iwm_nic_unlock(sc);
1684
1685         /* Enable L1-Active */
1686         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1687                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1688                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1689         }
1690
1691         return error;
1692 }
1693
1694 /*
1695  * NVM read access and content parsing.  We do not support
1696  * external NVM or writing NVM.
1697  * iwlwifi/mvm/nvm.c
1698  */
1699
1700 /* Default NVM size to read */
1701 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1702
1703 #define IWM_NVM_WRITE_OPCODE 1
1704 #define IWM_NVM_READ_OPCODE 0
1705
1706 /* load nvm chunk response */
1707 enum {
1708         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1709         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1710 };
1711
1712 static int
1713 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1714         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1715 {
1716         struct iwm_nvm_access_cmd nvm_access_cmd = {
1717                 .offset = htole16(offset),
1718                 .length = htole16(length),
1719                 .type = htole16(section),
1720                 .op_code = IWM_NVM_READ_OPCODE,
1721         };
1722         struct iwm_nvm_access_resp *nvm_resp;
1723         struct iwm_rx_packet *pkt;
1724         struct iwm_host_cmd cmd = {
1725                 .id = IWM_NVM_ACCESS_CMD,
1726                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1727                 .data = { &nvm_access_cmd, },
1728         };
1729         int ret, bytes_read, offset_read;
1730         uint8_t *resp_data;
1731
1732         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1733
1734         ret = iwm_send_cmd(sc, &cmd);
1735         if (ret) {
1736                 device_printf(sc->sc_dev,
1737                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1738                 return ret;
1739         }
1740
1741         pkt = cmd.resp_pkt;
1742
1743         /* Extract NVM response */
1744         nvm_resp = (void *)pkt->data;
1745         ret = le16toh(nvm_resp->status);
1746         bytes_read = le16toh(nvm_resp->length);
1747         offset_read = le16toh(nvm_resp->offset);
1748         resp_data = nvm_resp->data;
1749         if (ret) {
1750                 if ((offset != 0) &&
1751                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1752                         /*
1753                          * meaning of NOT_VALID_ADDRESS:
1754                          * driver try to read chunk from address that is
1755                          * multiple of 2K and got an error since addr is empty.
1756                          * meaning of (offset != 0): driver already
1757                          * read valid data from another chunk so this case
1758                          * is not an error.
1759                          */
1760                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1761                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1762                                     offset);
1763                         *len = 0;
1764                         ret = 0;
1765                 } else {
1766                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1767                                     "NVM access command failed with status %d\n", ret);
1768                         ret = EIO;
1769                 }
1770                 goto exit;
1771         }
1772
1773         if (offset_read != offset) {
1774                 device_printf(sc->sc_dev,
1775                     "NVM ACCESS response with invalid offset %d\n",
1776                     offset_read);
1777                 ret = EINVAL;
1778                 goto exit;
1779         }
1780
1781         if (bytes_read > length) {
1782                 device_printf(sc->sc_dev,
1783                     "NVM ACCESS response with too much data "
1784                     "(%d bytes requested, %d bytes received)\n",
1785                     length, bytes_read);
1786                 ret = EINVAL;
1787                 goto exit;
1788         }
1789
1790         /* Write data to NVM */
1791         memcpy(data + offset, resp_data, bytes_read);
1792         *len = bytes_read;
1793
1794  exit:
1795         iwm_free_resp(sc, &cmd);
1796         return ret;
1797 }
1798
1799 /*
1800  * Reads an NVM section completely.
1801  * NICs prior to 7000 family don't have a real NVM, but just read
1802  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1803  * by uCode, we need to manually check in this case that we don't
1804  * overflow and try to read more than the EEPROM size.
1805  * For 7000 family NICs, we supply the maximal size we can read, and
1806  * the uCode fills the response with as much data as we can,
1807  * without overflowing, so no check is needed.
1808  */
1809 static int
1810 iwm_nvm_read_section(struct iwm_softc *sc,
1811         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1812 {
1813         uint16_t seglen, length, offset = 0;
1814         int ret;
1815
1816         /* Set nvm section read length */
1817         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1818
1819         seglen = length;
1820
1821         /* Read the NVM until exhausted (reading less than requested) */
1822         while (seglen == length) {
1823                 /* Check no memory assumptions fail and cause an overflow */
1824                 if ((size_read + offset + length) >
1825                     sc->cfg->eeprom_size) {
1826                         device_printf(sc->sc_dev,
1827                             "EEPROM size is too small for NVM\n");
1828                         return ENOBUFS;
1829                 }
1830
1831                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1832                 if (ret) {
1833                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1834                                     "Cannot read NVM from section %d offset %d, length %d\n",
1835                                     section, offset, length);
1836                         return ret;
1837                 }
1838                 offset += seglen;
1839         }
1840
1841         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1842                     "NVM section %d read completed\n", section);
1843         *len = offset;
1844         return 0;
1845 }
1846
1847 /*
1848  * BEGIN IWM_NVM_PARSE
1849  */
1850
1851 /* iwlwifi/iwl-nvm-parse.c */
1852
1853 /* NVM offsets (in words) definitions */
1854 enum iwm_nvm_offsets {
1855         /* NVM HW-Section offset (in words) definitions */
1856         IWM_HW_ADDR = 0x15,
1857
1858 /* NVM SW-Section offset (in words) definitions */
1859         IWM_NVM_SW_SECTION = 0x1C0,
1860         IWM_NVM_VERSION = 0,
1861         IWM_RADIO_CFG = 1,
1862         IWM_SKU = 2,
1863         IWM_N_HW_ADDRS = 3,
1864         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1865
1866 /* NVM calibration section offset (in words) definitions */
1867         IWM_NVM_CALIB_SECTION = 0x2B8,
1868         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1869 };
1870
1871 enum iwm_8000_nvm_offsets {
1872         /* NVM HW-Section offset (in words) definitions */
1873         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1874         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1875         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1876         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1877         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1878
1879         /* NVM SW-Section offset (in words) definitions */
1880         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1881         IWM_NVM_VERSION_8000 = 0,
1882         IWM_RADIO_CFG_8000 = 0,
1883         IWM_SKU_8000 = 2,
1884         IWM_N_HW_ADDRS_8000 = 3,
1885
1886         /* NVM REGULATORY -Section offset (in words) definitions */
1887         IWM_NVM_CHANNELS_8000 = 0,
1888         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1889         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1890         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1891
1892         /* NVM calibration section offset (in words) definitions */
1893         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1894         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1895 };
1896
1897 /* SKU Capabilities (actual values from NVM definition) */
1898 enum nvm_sku_bits {
1899         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1900         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1901         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1902         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1903 };
1904
1905 /* radio config bits (actual values from NVM definition) */
1906 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1907 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1908 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1909 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1910 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1911 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1912
1913 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1914 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1915 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1916 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1917 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1918 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1919
1920 #define DEFAULT_MAX_TX_POWER 16
1921
1922 /**
1923  * enum iwm_nvm_channel_flags - channel flags in NVM
1924  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1925  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1926  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1927  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1928  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1929  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1930  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1931  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1932  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1933  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1934  */
1935 enum iwm_nvm_channel_flags {
1936         IWM_NVM_CHANNEL_VALID = (1 << 0),
1937         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1938         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1939         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1940         IWM_NVM_CHANNEL_DFS = (1 << 7),
1941         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1942         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1943         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1944         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1945 };
1946
1947 /*
1948  * Translate EEPROM flags to net80211.
1949  */
1950 static uint32_t
1951 iwm_eeprom_channel_flags(uint16_t ch_flags)
1952 {
1953         uint32_t nflags;
1954
1955         nflags = 0;
1956         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1957                 nflags |= IEEE80211_CHAN_PASSIVE;
1958         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1959                 nflags |= IEEE80211_CHAN_NOADHOC;
1960         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1961                 nflags |= IEEE80211_CHAN_DFS;
1962                 /* Just in case. */
1963                 nflags |= IEEE80211_CHAN_NOADHOC;
1964         }
1965
1966         return (nflags);
1967 }
1968
1969 static void
1970 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1971     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1972     const uint8_t bands[])
1973 {
1974         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1975         uint32_t nflags;
1976         uint16_t ch_flags;
1977         uint8_t ieee;
1978         int error;
1979
1980         for (; ch_idx < ch_num; ch_idx++) {
1981                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1982                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1983                         ieee = iwm_nvm_channels[ch_idx];
1984                 else
1985                         ieee = iwm_nvm_channels_8000[ch_idx];
1986
1987                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1988                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1989                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
1990                             ieee, ch_flags,
1991                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1992                             "5.2" : "2.4");
1993                         continue;
1994                 }
1995
1996                 nflags = iwm_eeprom_channel_flags(ch_flags);
1997                 error = ieee80211_add_channel(chans, maxchans, nchans,
1998                     ieee, 0, 0, nflags, bands);
1999                 if (error != 0)
2000                         break;
2001
2002                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2003                     "Ch. %d Flags %x [%sGHz] - Added\n",
2004                     ieee, ch_flags,
2005                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2006                     "5.2" : "2.4");
2007         }
2008 }
2009
2010 static void
2011 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2012     struct ieee80211_channel chans[])
2013 {
2014         struct iwm_softc *sc = ic->ic_softc;
2015         struct iwm_nvm_data *data = sc->nvm_data;
2016         uint8_t bands[IEEE80211_MODE_BYTES];
2017         size_t ch_num;
2018
2019         memset(bands, 0, sizeof(bands));
2020         /* 1-13: 11b/g channels. */
2021         setbit(bands, IEEE80211_MODE_11B);
2022         setbit(bands, IEEE80211_MODE_11G);
2023         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2024             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2025
2026         /* 14: 11b channel only. */
2027         clrbit(bands, IEEE80211_MODE_11G);
2028         iwm_add_channel_band(sc, chans, maxchans, nchans,
2029             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2030
2031         if (data->sku_cap_band_52GHz_enable) {
2032                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2033                         ch_num = nitems(iwm_nvm_channels);
2034                 else
2035                         ch_num = nitems(iwm_nvm_channels_8000);
2036                 memset(bands, 0, sizeof(bands));
2037                 setbit(bands, IEEE80211_MODE_11A);
2038                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2039                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2040         }
2041 }
2042
2043 static void
2044 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2045         const uint16_t *mac_override, const uint16_t *nvm_hw)
2046 {
2047         const uint8_t *hw_addr;
2048
2049         if (mac_override) {
2050                 static const uint8_t reserved_mac[] = {
2051                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2052                 };
2053
2054                 hw_addr = (const uint8_t *)(mac_override +
2055                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2056
2057                 /*
2058                  * Store the MAC address from MAO section.
2059                  * No byte swapping is required in MAO section
2060                  */
2061                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2062
2063                 /*
2064                  * Force the use of the OTP MAC address in case of reserved MAC
2065                  * address in the NVM, or if address is given but invalid.
2066                  */
2067                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2068                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2069                     iwm_is_valid_ether_addr(data->hw_addr) &&
2070                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2071                         return;
2072
2073                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2074                     "%s: mac address from nvm override section invalid\n",
2075                     __func__);
2076         }
2077
2078         if (nvm_hw) {
2079                 /* read the mac address from WFMP registers */
2080                 uint32_t mac_addr0 =
2081                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2082                 uint32_t mac_addr1 =
2083                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2084
2085                 hw_addr = (const uint8_t *)&mac_addr0;
2086                 data->hw_addr[0] = hw_addr[3];
2087                 data->hw_addr[1] = hw_addr[2];
2088                 data->hw_addr[2] = hw_addr[1];
2089                 data->hw_addr[3] = hw_addr[0];
2090
2091                 hw_addr = (const uint8_t *)&mac_addr1;
2092                 data->hw_addr[4] = hw_addr[1];
2093                 data->hw_addr[5] = hw_addr[0];
2094
2095                 return;
2096         }
2097
2098         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2099         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2100 }
2101
2102 static int
2103 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2104             const uint16_t *phy_sku)
2105 {
2106         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2107                 return le16_to_cpup(nvm_sw + IWM_SKU);
2108
2109         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2110 }
2111
2112 static int
2113 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2114 {
2115         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2116                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2117         else
2118                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2119                                                 IWM_NVM_VERSION_8000));
2120 }
2121
2122 static int
2123 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2124                   const uint16_t *phy_sku)
2125 {
2126         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2127                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2128
2129         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2130 }
2131
2132 static int
2133 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2134 {
2135         int n_hw_addr;
2136
2137         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2138                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2139
2140         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2141
2142         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2143 }
2144
2145 static void
2146 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2147                   uint32_t radio_cfg)
2148 {
2149         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2150                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2151                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2152                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2153                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2154                 return;
2155         }
2156
2157         /* set the radio configuration for family 8000 */
2158         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2159         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2160         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2161         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2162         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2163         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2164 }
2165
2166 static int
2167 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2168                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2169 {
2170 #ifdef notyet /* for FAMILY 9000 */
2171         if (cfg->mac_addr_from_csr) {
2172                 iwm_set_hw_address_from_csr(sc, data);
2173         } else
2174 #endif
2175         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2176                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2177
2178                 /* The byte order is little endian 16 bit, meaning 214365 */
2179                 data->hw_addr[0] = hw_addr[1];
2180                 data->hw_addr[1] = hw_addr[0];
2181                 data->hw_addr[2] = hw_addr[3];
2182                 data->hw_addr[3] = hw_addr[2];
2183                 data->hw_addr[4] = hw_addr[5];
2184                 data->hw_addr[5] = hw_addr[4];
2185         } else {
2186                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2187         }
2188
2189         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2190                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2191                 return EINVAL;
2192         }
2193
2194         return 0;
2195 }
2196
2197 static struct iwm_nvm_data *
2198 iwm_parse_nvm_data(struct iwm_softc *sc,
2199                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2200                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2201                    const uint16_t *phy_sku, const uint16_t *regulatory)
2202 {
2203         struct iwm_nvm_data *data;
2204         uint32_t sku, radio_cfg;
2205
2206         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2207                 data = malloc(sizeof(*data) +
2208                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2209                     M_DEVBUF, M_NOWAIT | M_ZERO);
2210         } else {
2211                 data = malloc(sizeof(*data) +
2212                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2213                     M_DEVBUF, M_NOWAIT | M_ZERO);
2214         }
2215         if (!data)
2216                 return NULL;
2217
2218         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2219
2220         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2221         iwm_set_radio_cfg(sc, data, radio_cfg);
2222
2223         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2224         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2225         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2226         data->sku_cap_11n_enable = 0;
2227
2228         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2229
2230         /* If no valid mac address was found - bail out */
2231         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2232                 free(data, M_DEVBUF);
2233                 return NULL;
2234         }
2235
2236         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2237                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2238                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2239         } else {
2240                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2241                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2242         }
2243
2244         return data;
2245 }
2246
2247 static void
2248 iwm_free_nvm_data(struct iwm_nvm_data *data)
2249 {
2250         if (data != NULL)
2251                 free(data, M_DEVBUF);
2252 }
2253
2254 static struct iwm_nvm_data *
2255 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2256 {
2257         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2258
2259         /* Checking for required sections */
2260         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2261                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2262                     !sections[sc->cfg->nvm_hw_section_num].data) {
2263                         device_printf(sc->sc_dev,
2264                             "Can't parse empty OTP/NVM sections\n");
2265                         return NULL;
2266                 }
2267         } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2268                 /* SW and REGULATORY sections are mandatory */
2269                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2270                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2271                         device_printf(sc->sc_dev,
2272                             "Can't parse empty OTP/NVM sections\n");
2273                         return NULL;
2274                 }
2275                 /* MAC_OVERRIDE or at least HW section must exist */
2276                 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2277                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2278                         device_printf(sc->sc_dev,
2279                             "Can't parse mac_address, empty sections\n");
2280                         return NULL;
2281                 }
2282
2283                 /* PHY_SKU section is mandatory in B0 */
2284                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2285                         device_printf(sc->sc_dev,
2286                             "Can't parse phy_sku in B0, empty sections\n");
2287                         return NULL;
2288                 }
2289         } else {
2290                 panic("unknown device family %d\n", sc->cfg->device_family);
2291         }
2292
2293         hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2294         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2295         calib = (const uint16_t *)
2296             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2297         regulatory = (const uint16_t *)
2298             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2299         mac_override = (const uint16_t *)
2300             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2301         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2302
2303         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2304             phy_sku, regulatory);
2305 }
2306
2307 static int
2308 iwm_nvm_init(struct iwm_softc *sc)
2309 {
2310         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2311         int i, ret, section;
2312         uint32_t size_read = 0;
2313         uint8_t *nvm_buffer, *temp;
2314         uint16_t len;
2315
2316         memset(nvm_sections, 0, sizeof(nvm_sections));
2317
2318         if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2319                 return EINVAL;
2320
2321         /* load NVM values from nic */
2322         /* Read From FW NVM */
2323         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2324
2325         nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2326         if (!nvm_buffer)
2327                 return ENOMEM;
2328         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2329                 /* we override the constness for initial read */
2330                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2331                                            &len, size_read);
2332                 if (ret)
2333                         continue;
2334                 size_read += len;
2335                 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2336                 if (!temp) {
2337                         ret = ENOMEM;
2338                         break;
2339                 }
2340                 memcpy(temp, nvm_buffer, len);
2341
2342                 nvm_sections[section].data = temp;
2343                 nvm_sections[section].length = len;
2344         }
2345         if (!size_read)
2346                 device_printf(sc->sc_dev, "OTP is blank\n");
2347         free(nvm_buffer, M_DEVBUF);
2348
2349         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2350         if (!sc->nvm_data)
2351                 return EINVAL;
2352         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2353                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2354
2355         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2356                 if (nvm_sections[i].data != NULL)
2357                         free(nvm_sections[i].data, M_DEVBUF);
2358         }
2359
2360         return 0;
2361 }
2362
2363 static int
2364 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2365         const struct iwm_fw_desc *section)
2366 {
2367         struct iwm_dma_info *dma = &sc->fw_dma;
2368         uint8_t *v_addr;
2369         bus_addr_t p_addr;
2370         uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2371         int ret = 0;
2372
2373         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2374                     "%s: [%d] uCode section being loaded...\n",
2375                     __func__, section_num);
2376
2377         v_addr = dma->vaddr;
2378         p_addr = dma->paddr;
2379
2380         for (offset = 0; offset < section->len; offset += chunk_sz) {
2381                 uint32_t copy_size, dst_addr;
2382                 int extended_addr = FALSE;
2383
2384                 copy_size = MIN(chunk_sz, section->len - offset);
2385                 dst_addr = section->offset + offset;
2386
2387                 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2388                     dst_addr <= IWM_FW_MEM_EXTENDED_END)
2389                         extended_addr = TRUE;
2390
2391                 if (extended_addr)
2392                         iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2393                                           IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2394
2395                 memcpy(v_addr, (const uint8_t *)section->data + offset,
2396                     copy_size);
2397                 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2398                 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2399                                                    copy_size);
2400
2401                 if (extended_addr)
2402                         iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2403                                             IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2404
2405                 if (ret) {
2406                         device_printf(sc->sc_dev,
2407                             "%s: Could not load the [%d] uCode section\n",
2408                             __func__, section_num);
2409                         break;
2410                 }
2411         }
2412
2413         return ret;
2414 }
2415
2416 /*
2417  * ucode
2418  */
2419 static int
2420 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2421                              bus_addr_t phy_addr, uint32_t byte_cnt)
2422 {
2423         int ret;
2424
2425         sc->sc_fw_chunk_done = 0;
2426
2427         if (!iwm_nic_lock(sc))
2428                 return EBUSY;
2429
2430         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2431             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2432
2433         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2434             dst_addr);
2435
2436         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2437             phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2438
2439         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2440             (iwm_get_dma_hi_addr(phy_addr)
2441              << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2442
2443         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2444             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2445             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2446             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2447
2448         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2449             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2450             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2451             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2452
2453         iwm_nic_unlock(sc);
2454
2455         /* wait up to 5s for this segment to load */
2456         ret = 0;
2457         while (!sc->sc_fw_chunk_done) {
2458                 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2459                 if (ret)
2460                         break;
2461         }
2462
2463         if (ret != 0) {
2464                 device_printf(sc->sc_dev,
2465                     "fw chunk addr 0x%x len %d failed to load\n",
2466                     dst_addr, byte_cnt);
2467                 return ETIMEDOUT;
2468         }
2469
2470         return 0;
2471 }
2472
2473 static int
2474 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2475         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2476 {
2477         int shift_param;
2478         int i, ret = 0, sec_num = 0x1;
2479         uint32_t val, last_read_idx = 0;
2480
2481         if (cpu == 1) {
2482                 shift_param = 0;
2483                 *first_ucode_section = 0;
2484         } else {
2485                 shift_param = 16;
2486                 (*first_ucode_section)++;
2487         }
2488
2489         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2490                 last_read_idx = i;
2491
2492                 /*
2493                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2494                  * CPU1 to CPU2.
2495                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2496                  * CPU2 non paged to CPU2 paging sec.
2497                  */
2498                 if (!image->fw_sect[i].data ||
2499                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2500                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2501                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2502                                     "Break since Data not valid or Empty section, sec = %d\n",
2503                                     i);
2504                         break;
2505                 }
2506                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2507                 if (ret)
2508                         return ret;
2509
2510                 /* Notify the ucode of the loaded section number and status */
2511                 if (iwm_nic_lock(sc)) {
2512                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2513                         val = val | (sec_num << shift_param);
2514                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2515                         sec_num = (sec_num << 1) | 0x1;
2516                         iwm_nic_unlock(sc);
2517                 }
2518         }
2519
2520         *first_ucode_section = last_read_idx;
2521
2522         iwm_enable_interrupts(sc);
2523
2524         if (iwm_nic_lock(sc)) {
2525                 if (cpu == 1)
2526                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2527                 else
2528                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2529                 iwm_nic_unlock(sc);
2530         }
2531
2532         return 0;
2533 }
2534
2535 static int
2536 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2537         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2538 {
2539         int shift_param;
2540         int i, ret = 0;
2541         uint32_t last_read_idx = 0;
2542
2543         if (cpu == 1) {
2544                 shift_param = 0;
2545                 *first_ucode_section = 0;
2546         } else {
2547                 shift_param = 16;
2548                 (*first_ucode_section)++;
2549         }
2550
2551         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2552                 last_read_idx = i;
2553
2554                 /*
2555                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2556                  * CPU1 to CPU2.
2557                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2558                  * CPU2 non paged to CPU2 paging sec.
2559                  */
2560                 if (!image->fw_sect[i].data ||
2561                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2562                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2563                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2564                                     "Break since Data not valid or Empty section, sec = %d\n",
2565                                      i);
2566                         break;
2567                 }
2568
2569                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2570                 if (ret)
2571                         return ret;
2572         }
2573
2574         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2575                 iwm_set_bits_prph(sc,
2576                                   IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2577                                   (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2578                                    IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2579                                    IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2580                                         shift_param);
2581
2582         *first_ucode_section = last_read_idx;
2583
2584         return 0;
2585
2586 }
2587
2588 static int
2589 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2590         const struct iwm_fw_sects *image)
2591 {
2592         int ret = 0;
2593         int first_ucode_section;
2594
2595         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2596                      image->is_dual_cpus ? "Dual" : "Single");
2597
2598         /* load to FW the binary non secured sections of CPU1 */
2599         ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2600         if (ret)
2601                 return ret;
2602
2603         if (image->is_dual_cpus) {
2604                 /* set CPU2 header address */
2605                 if (iwm_nic_lock(sc)) {
2606                         iwm_write_prph(sc,
2607                                        IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2608                                        IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2609                         iwm_nic_unlock(sc);
2610                 }
2611
2612                 /* load to FW the binary sections of CPU2 */
2613                 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2614                                                  &first_ucode_section);
2615                 if (ret)
2616                         return ret;
2617         }
2618
2619         iwm_enable_interrupts(sc);
2620
2621         /* release CPU reset */
2622         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2623
2624         return 0;
2625 }
2626
2627 int
2628 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2629         const struct iwm_fw_sects *image)
2630 {
2631         int ret = 0;
2632         int first_ucode_section;
2633
2634         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2635                     image->is_dual_cpus ? "Dual" : "Single");
2636
2637         /* configure the ucode to be ready to get the secured image */
2638         /* release CPU reset */
2639         if (iwm_nic_lock(sc)) {
2640                 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2641                     IWM_RELEASE_CPU_RESET_BIT);
2642                 iwm_nic_unlock(sc);
2643         }
2644
2645         /* load to FW the binary Secured sections of CPU1 */
2646         ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2647             &first_ucode_section);
2648         if (ret)
2649                 return ret;
2650
2651         /* load to FW the binary sections of CPU2 */
2652         return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2653             &first_ucode_section);
2654 }
2655
2656 /* XXX Get rid of this definition */
2657 static inline void
2658 iwm_enable_fw_load_int(struct iwm_softc *sc)
2659 {
2660         IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2661         sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2662         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2663 }
2664
2665 /* XXX Add proper rfkill support code */
2666 static int
2667 iwm_start_fw(struct iwm_softc *sc,
2668         const struct iwm_fw_sects *fw)
2669 {
2670         int ret;
2671
2672         /* This may fail if AMT took ownership of the device */
2673         if (iwm_prepare_card_hw(sc)) {
2674                 device_printf(sc->sc_dev,
2675                     "%s: Exit HW not ready\n", __func__);
2676                 ret = EIO;
2677                 goto out;
2678         }
2679
2680         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2681
2682         iwm_disable_interrupts(sc);
2683
2684         /* make sure rfkill handshake bits are cleared */
2685         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2686         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2687             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2688
2689         /* clear (again), then enable host interrupts */
2690         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2691
2692         ret = iwm_nic_init(sc);
2693         if (ret) {
2694                 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2695                 goto out;
2696         }
2697
2698         /*
2699          * Now, we load the firmware and don't want to be interrupted, even
2700          * by the RF-Kill interrupt (hence mask all the interrupt besides the
2701          * FH_TX interrupt which is needed to load the firmware). If the
2702          * RF-Kill switch is toggled, we will find out after having loaded
2703          * the firmware and return the proper value to the caller.
2704          */
2705         iwm_enable_fw_load_int(sc);
2706
2707         /* really make sure rfkill handshake bits are cleared */
2708         /* maybe we should write a few times more?  just to make sure */
2709         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2710         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2711
2712         /* Load the given image to the HW */
2713         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2714                 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2715         else
2716                 ret = iwm_pcie_load_given_ucode(sc, fw);
2717
2718         /* XXX re-check RF-Kill state */
2719
2720 out:
2721         return ret;
2722 }
2723
2724 static int
2725 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2726 {
2727         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2728                 .valid = htole32(valid_tx_ant),
2729         };
2730
2731         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2732             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2733 }
2734
2735 /* iwlwifi: mvm/fw.c */
2736 static int
2737 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2738 {
2739         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2740         enum iwm_ucode_type ucode_type = sc->cur_ucode;
2741
2742         /* Set parameters */
2743         phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2744         phy_cfg_cmd.calib_control.event_trigger =
2745             sc->sc_default_calib[ucode_type].event_trigger;
2746         phy_cfg_cmd.calib_control.flow_trigger =
2747             sc->sc_default_calib[ucode_type].flow_trigger;
2748
2749         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2750             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2751         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2752             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2753 }
2754
2755 static int
2756 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2757 {
2758         struct iwm_mvm_alive_data *alive_data = data;
2759         struct iwm_mvm_alive_resp_ver1 *palive1;
2760         struct iwm_mvm_alive_resp_ver2 *palive2;
2761         struct iwm_mvm_alive_resp *palive;
2762
2763         if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2764                 palive1 = (void *)pkt->data;
2765
2766                 sc->support_umac_log = FALSE;
2767                 sc->error_event_table =
2768                         le32toh(palive1->error_event_table_ptr);
2769                 sc->log_event_table =
2770                         le32toh(palive1->log_event_table_ptr);
2771                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2772
2773                 alive_data->valid = le16toh(palive1->status) ==
2774                                     IWM_ALIVE_STATUS_OK;
2775                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2776                             "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2777                              le16toh(palive1->status), palive1->ver_type,
2778                              palive1->ver_subtype, palive1->flags);
2779         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2780                 palive2 = (void *)pkt->data;
2781                 sc->error_event_table =
2782                         le32toh(palive2->error_event_table_ptr);
2783                 sc->log_event_table =
2784                         le32toh(palive2->log_event_table_ptr);
2785                 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2786                 sc->umac_error_event_table =
2787                         le32toh(palive2->error_info_addr);
2788
2789                 alive_data->valid = le16toh(palive2->status) ==
2790                                     IWM_ALIVE_STATUS_OK;
2791                 if (sc->umac_error_event_table)
2792                         sc->support_umac_log = TRUE;
2793
2794                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2795                             "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2796                             le16toh(palive2->status), palive2->ver_type,
2797                             palive2->ver_subtype, palive2->flags);
2798
2799                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2800                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2801                             palive2->umac_major, palive2->umac_minor);
2802         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2803                 palive = (void *)pkt->data;
2804
2805                 sc->error_event_table =
2806                         le32toh(palive->error_event_table_ptr);
2807                 sc->log_event_table =
2808                         le32toh(palive->log_event_table_ptr);
2809                 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2810                 sc->umac_error_event_table =
2811                         le32toh(palive->error_info_addr);
2812
2813                 alive_data->valid = le16toh(palive->status) ==
2814                                     IWM_ALIVE_STATUS_OK;
2815                 if (sc->umac_error_event_table)
2816                         sc->support_umac_log = TRUE;
2817
2818                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2819                             "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2820                             le16toh(palive->status), palive->ver_type,
2821                             palive->ver_subtype, palive->flags);
2822
2823                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2824                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2825                             le32toh(palive->umac_major),
2826                             le32toh(palive->umac_minor));
2827         }
2828
2829         return TRUE;
2830 }
2831
2832 static int
2833 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2834         struct iwm_rx_packet *pkt, void *data)
2835 {
2836         struct iwm_phy_db *phy_db = data;
2837
2838         if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2839                 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2840                         device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2841                             __func__, pkt->hdr.code);
2842                 }
2843                 return TRUE;
2844         }
2845
2846         if (iwm_phy_db_set_section(phy_db, pkt)) {
2847                 device_printf(sc->sc_dev,
2848                     "%s: iwm_phy_db_set_section failed\n", __func__);
2849         }
2850
2851         return FALSE;
2852 }
2853
2854 static int
2855 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2856         enum iwm_ucode_type ucode_type)
2857 {
2858         struct iwm_notification_wait alive_wait;
2859         struct iwm_mvm_alive_data alive_data;
2860         const struct iwm_fw_sects *fw;
2861         enum iwm_ucode_type old_type = sc->cur_ucode;
2862         int error;
2863         static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2864
2865         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2866                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2867                         error);
2868                 return error;
2869         }
2870         fw = &sc->sc_fw.fw_sects[ucode_type];
2871         sc->cur_ucode = ucode_type;
2872         sc->ucode_loaded = FALSE;
2873
2874         memset(&alive_data, 0, sizeof(alive_data));
2875         iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2876                                    alive_cmd, nitems(alive_cmd),
2877                                    iwm_alive_fn, &alive_data);
2878
2879         error = iwm_start_fw(sc, fw);
2880         if (error) {
2881                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2882                 sc->cur_ucode = old_type;
2883                 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2884                 return error;
2885         }
2886
2887         /*
2888          * Some things may run in the background now, but we
2889          * just wait for the ALIVE notification here.
2890          */
2891         IWM_UNLOCK(sc);
2892         error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2893                                       IWM_MVM_UCODE_ALIVE_TIMEOUT);
2894         IWM_LOCK(sc);
2895         if (error) {
2896                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2897                         uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2898                         if (iwm_nic_lock(sc)) {
2899                                 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2900                                 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2901                                 iwm_nic_unlock(sc);
2902                         }
2903                         device_printf(sc->sc_dev,
2904                             "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2905                             a, b);
2906                 }
2907                 sc->cur_ucode = old_type;
2908                 return error;
2909         }
2910
2911         if (!alive_data.valid) {
2912                 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2913                     __func__);
2914                 sc->cur_ucode = old_type;
2915                 return EIO;
2916         }
2917
2918         iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2919
2920         /*
2921          * configure and operate fw paging mechanism.
2922          * driver configures the paging flow only once, CPU2 paging image
2923          * included in the IWM_UCODE_INIT image.
2924          */
2925         if (fw->paging_mem_size) {
2926                 error = iwm_save_fw_paging(sc, fw);
2927                 if (error) {
2928                         device_printf(sc->sc_dev,
2929                             "%s: failed to save the FW paging image\n",
2930                             __func__);
2931                         return error;
2932                 }
2933
2934                 error = iwm_send_paging_cmd(sc, fw);
2935                 if (error) {
2936                         device_printf(sc->sc_dev,
2937                             "%s: failed to send the paging cmd\n", __func__);
2938                         iwm_free_fw_paging(sc);
2939                         return error;
2940                 }
2941         }
2942
2943         if (!error)
2944                 sc->ucode_loaded = TRUE;
2945         return error;
2946 }
2947
2948 /*
2949  * mvm misc bits
2950  */
2951
2952 /*
2953  * follows iwlwifi/fw.c
2954  */
2955 static int
2956 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2957 {
2958         struct iwm_notification_wait calib_wait;
2959         static const uint16_t init_complete[] = {
2960                 IWM_INIT_COMPLETE_NOTIF,
2961                 IWM_CALIB_RES_NOTIF_PHY_DB
2962         };
2963         int ret;
2964
2965         /* do not operate with rfkill switch turned on */
2966         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2967                 device_printf(sc->sc_dev,
2968                     "radio is disabled by hardware switch\n");
2969                 return EPERM;
2970         }
2971
2972         iwm_init_notification_wait(sc->sc_notif_wait,
2973                                    &calib_wait,
2974                                    init_complete,
2975                                    nitems(init_complete),
2976                                    iwm_wait_phy_db_entry,
2977                                    sc->sc_phy_db);
2978
2979         /* Will also start the device */
2980         ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2981         if (ret) {
2982                 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2983                     ret);
2984                 goto error;
2985         }
2986
2987         if (justnvm) {
2988                 /* Read nvm */
2989                 ret = iwm_nvm_init(sc);
2990                 if (ret) {
2991                         device_printf(sc->sc_dev, "failed to read nvm\n");
2992                         goto error;
2993                 }
2994                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2995                 goto error;
2996         }
2997
2998         ret = iwm_send_bt_init_conf(sc);
2999         if (ret) {
3000                 device_printf(sc->sc_dev,
3001                     "failed to send bt coex configuration: %d\n", ret);
3002                 goto error;
3003         }
3004
3005         /* Init Smart FIFO. */
3006         ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
3007         if (ret)
3008                 goto error;
3009
3010         /* Send TX valid antennas before triggering calibrations */
3011         ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3012         if (ret) {
3013                 device_printf(sc->sc_dev,
3014                     "failed to send antennas before calibration: %d\n", ret);
3015                 goto error;
3016         }
3017
3018         /*
3019          * Send phy configurations command to init uCode
3020          * to start the 16.0 uCode init image internal calibrations.
3021          */
3022         ret = iwm_send_phy_cfg_cmd(sc);
3023         if (ret) {
3024                 device_printf(sc->sc_dev,
3025                     "%s: Failed to run INIT calibrations: %d\n",
3026                     __func__, ret);
3027                 goto error;
3028         }
3029
3030         /*
3031          * Nothing to do but wait for the init complete notification
3032          * from the firmware.
3033          */
3034         IWM_UNLOCK(sc);
3035         ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3036             IWM_MVM_UCODE_CALIB_TIMEOUT);
3037         IWM_LOCK(sc);
3038
3039
3040         goto out;
3041
3042 error:
3043         iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3044 out:
3045         return ret;
3046 }
3047
3048 /*
3049  * receive side
3050  */
3051
3052 /* (re)stock rx ring, called at init-time and at runtime */
3053 static int
3054 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3055 {
3056         struct iwm_rx_ring *ring = &sc->rxq;
3057         struct iwm_rx_data *data = &ring->data[idx];
3058         struct mbuf *m;
3059         bus_dmamap_t dmamap;
3060         bus_dma_segment_t seg;
3061         int nsegs, error;
3062
3063         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3064         if (m == NULL)
3065                 return ENOBUFS;
3066
3067         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3068         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3069             &seg, &nsegs, BUS_DMA_NOWAIT);
3070         if (error != 0) {
3071                 device_printf(sc->sc_dev,
3072                     "%s: can't map mbuf, error %d\n", __func__, error);
3073                 m_freem(m);
3074                 return error;
3075         }
3076
3077         if (data->m != NULL)
3078                 bus_dmamap_unload(ring->data_dmat, data->map);
3079
3080         /* Swap ring->spare_map with data->map */
3081         dmamap = data->map;
3082         data->map = ring->spare_map;
3083         ring->spare_map = dmamap;
3084
3085         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3086         data->m = m;
3087
3088         /* Update RX descriptor. */
3089         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3090         ring->desc[idx] = htole32(seg.ds_addr >> 8);
3091         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3092             BUS_DMASYNC_PREWRITE);
3093
3094         return 0;
3095 }
3096
3097 /* iwlwifi: mvm/rx.c */
3098 /*
3099  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3100  * values are reported by the fw as positive values - need to negate
3101  * to obtain their dBM.  Account for missing antennas by replacing 0
3102  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3103  */
3104 static int
3105 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3106 {
3107         int energy_a, energy_b, energy_c, max_energy;
3108         uint32_t val;
3109
3110         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3111         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3112             IWM_RX_INFO_ENERGY_ANT_A_POS;
3113         energy_a = energy_a ? -energy_a : -256;
3114         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3115             IWM_RX_INFO_ENERGY_ANT_B_POS;
3116         energy_b = energy_b ? -energy_b : -256;
3117         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3118             IWM_RX_INFO_ENERGY_ANT_C_POS;
3119         energy_c = energy_c ? -energy_c : -256;
3120         max_energy = MAX(energy_a, energy_b);
3121         max_energy = MAX(max_energy, energy_c);
3122
3123         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3124             "energy In A %d B %d C %d , and max %d\n",
3125             energy_a, energy_b, energy_c, max_energy);
3126
3127         return max_energy;
3128 }
3129
3130 static void
3131 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3132 {
3133         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3134
3135         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3136
3137         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3138 }
3139
3140 /*
3141  * Retrieve the average noise (in dBm) among receivers.
3142  */
3143 static int
3144 iwm_get_noise(struct iwm_softc *sc,
3145     const struct iwm_mvm_statistics_rx_non_phy *stats)
3146 {
3147         int i, total, nbant, noise;
3148
3149         total = nbant = noise = 0;
3150         for (i = 0; i < 3; i++) {
3151                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3152                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3153                     __func__,
3154                     i,
3155                     noise);
3156
3157                 if (noise) {
3158                         total += noise;
3159                         nbant++;
3160                 }
3161         }
3162
3163         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3164             __func__, nbant, total);
3165 #if 0
3166         /* There should be at least one antenna but check anyway. */
3167         return (nbant == 0) ? -127 : (total / nbant) - 107;
3168 #else
3169         /* For now, just hard-code it to -96 to be safe */
3170         return (-96);
3171 #endif
3172 }
3173
3174 /*
3175  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3176  *
3177  * Handles the actual data of the Rx packet from the fw
3178  */
3179 static boolean_t
3180 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3181         boolean_t stolen)
3182 {
3183         struct ieee80211com *ic = &sc->sc_ic;
3184         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3185         struct ieee80211_frame *wh;
3186         struct ieee80211_node *ni;
3187         struct ieee80211_rx_stats rxs;
3188         struct iwm_rx_phy_info *phy_info;
3189         struct iwm_rx_mpdu_res_start *rx_res;
3190         struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3191         uint32_t len;
3192         uint32_t rx_pkt_status;
3193         int rssi;
3194
3195         phy_info = &sc->sc_last_phy_info;
3196         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3197         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3198         len = le16toh(rx_res->byte_count);
3199         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3200
3201         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3202                 device_printf(sc->sc_dev,
3203                     "dsp size out of range [0,20]: %d\n",
3204                     phy_info->cfg_phy_cnt);
3205                 goto fail;
3206         }
3207
3208         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3209             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3210                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3211                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3212                 goto fail;
3213         }
3214
3215         rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3216
3217         /* Map it to relative value */
3218         rssi = rssi - sc->sc_noise;
3219
3220         /* replenish ring for the buffer we're going to feed to the sharks */
3221         if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3222                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3223                     __func__);
3224                 goto fail;
3225         }
3226
3227         m->m_data = pkt->data + sizeof(*rx_res);
3228         m->m_pkthdr.len = m->m_len = len;
3229
3230         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3231             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3232
3233         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3234
3235         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3236             "%s: phy_info: channel=%d, flags=0x%08x\n",
3237             __func__,
3238             le16toh(phy_info->channel),
3239             le16toh(phy_info->phy_flags));
3240
3241         /*
3242          * Populate an RX state struct with the provided information.
3243          */
3244         bzero(&rxs, sizeof(rxs));
3245         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3246         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3247         rxs.c_ieee = le16toh(phy_info->channel);
3248         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3249                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3250         } else {
3251                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3252         }
3253
3254         /* rssi is in 1/2db units */
3255         rxs.c_rssi = rssi * 2;
3256         rxs.c_nf = sc->sc_noise;
3257         if (ieee80211_add_rx_params(m, &rxs) == 0) {
3258                 if (ni)
3259                         ieee80211_free_node(ni);
3260                 goto fail;
3261         }
3262
3263         if (ieee80211_radiotap_active_vap(vap)) {
3264                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3265
3266                 tap->wr_flags = 0;
3267                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3268                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3269                 tap->wr_chan_freq = htole16(rxs.c_freq);
3270                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3271                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3272                 tap->wr_dbm_antsignal = (int8_t)rssi;
3273                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3274                 tap->wr_tsft = phy_info->system_timestamp;
3275                 switch (phy_info->rate) {
3276                 /* CCK rates. */
3277                 case  10: tap->wr_rate =   2; break;
3278                 case  20: tap->wr_rate =   4; break;
3279                 case  55: tap->wr_rate =  11; break;
3280                 case 110: tap->wr_rate =  22; break;
3281                 /* OFDM rates. */
3282                 case 0xd: tap->wr_rate =  12; break;
3283                 case 0xf: tap->wr_rate =  18; break;
3284                 case 0x5: tap->wr_rate =  24; break;
3285                 case 0x7: tap->wr_rate =  36; break;
3286                 case 0x9: tap->wr_rate =  48; break;
3287                 case 0xb: tap->wr_rate =  72; break;
3288                 case 0x1: tap->wr_rate =  96; break;
3289                 case 0x3: tap->wr_rate = 108; break;
3290                 /* Unknown rate: should not happen. */
3291                 default:  tap->wr_rate =   0;
3292                 }
3293         }
3294
3295         IWM_UNLOCK(sc);
3296         if (ni != NULL) {
3297                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3298                 ieee80211_input_mimo(ni, m);
3299                 ieee80211_free_node(ni);
3300         } else {
3301                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3302                 ieee80211_input_mimo_all(ic, m);
3303         }
3304         IWM_LOCK(sc);
3305
3306         return TRUE;
3307
3308 fail:
3309         counter_u64_add(ic->ic_ierrors, 1);
3310         return FALSE;
3311 }
3312
3313 static int
3314 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3315         struct iwm_node *in)
3316 {
3317         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3318         struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3319         struct ieee80211_node *ni = &in->in_ni;
3320         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3321
3322         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3323
3324         /* Update rate control statistics. */
3325         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3326             __func__,
3327             (int) le16toh(tx_resp->status.status),
3328             (int) le16toh(tx_resp->status.sequence),
3329             tx_resp->frame_count,
3330             tx_resp->bt_kill_count,
3331             tx_resp->failure_rts,
3332             tx_resp->failure_frame,
3333             le32toh(tx_resp->initial_rate),
3334             (int) le16toh(tx_resp->wireless_media_time));
3335
3336         txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3337                      IEEE80211_RATECTL_STATUS_LONG_RETRY;
3338         txs->short_retries = tx_resp->failure_rts;
3339         txs->long_retries = tx_resp->failure_frame;
3340         if (status != IWM_TX_STATUS_SUCCESS &&
3341             status != IWM_TX_STATUS_DIRECT_DONE) {
3342                 switch (status) {
3343                 case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3344                         txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3345                         break;
3346                 case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3347                         txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3348                         break;
3349                 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3350                         txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3351                         break;
3352                 default:
3353                         txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3354                         break;
3355                 }
3356         } else {
3357                 txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3358         }
3359         ieee80211_ratectl_tx_complete(ni, txs);
3360
3361         return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3362 }
3363
3364 static void
3365 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3366 {
3367         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3368         int idx = cmd_hdr->idx;
3369         int qid = cmd_hdr->qid;
3370         struct iwm_tx_ring *ring = &sc->txq[qid];
3371         struct iwm_tx_data *txd = &ring->data[idx];
3372         struct iwm_node *in = txd->in;
3373         struct mbuf *m = txd->m;
3374         int status;
3375
3376         KASSERT(txd->done == 0, ("txd not done"));
3377         KASSERT(txd->in != NULL, ("txd without node"));
3378         KASSERT(txd->m != NULL, ("txd without mbuf"));
3379
3380         sc->sc_tx_timer = 0;
3381
3382         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3383
3384         /* Unmap and free mbuf. */
3385         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3386         bus_dmamap_unload(ring->data_dmat, txd->map);
3387
3388         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3389             "free txd %p, in %p\n", txd, txd->in);
3390         txd->done = 1;
3391         txd->m = NULL;
3392         txd->in = NULL;
3393
3394         ieee80211_tx_complete(&in->in_ni, m, status);
3395
3396         if (--ring->queued < IWM_TX_RING_LOMARK) {
3397                 sc->qfullmsk &= ~(1 << ring->qid);
3398                 if (sc->qfullmsk == 0) {
3399                         iwm_start(sc);
3400                 }
3401         }
3402 }
3403
3404 /*
3405  * transmit side
3406  */
3407
3408 /*
3409  * Process a "command done" firmware notification.  This is where we wakeup
3410  * processes waiting for a synchronous command completion.
3411  * from if_iwn
3412  */
3413 static void
3414 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3415 {
3416         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3417         struct iwm_tx_data *data;
3418
3419         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3420                 return; /* Not a command ack. */
3421         }
3422
3423         /* XXX wide commands? */
3424         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3425             "cmd notification type 0x%x qid %d idx %d\n",
3426             pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3427
3428         data = &ring->data[pkt->hdr.idx];
3429
3430         /* If the command was mapped in an mbuf, free it. */
3431         if (data->m != NULL) {
3432                 bus_dmamap_sync(ring->data_dmat, data->map,
3433                     BUS_DMASYNC_POSTWRITE);
3434                 bus_dmamap_unload(ring->data_dmat, data->map);
3435                 m_freem(data->m);
3436                 data->m = NULL;
3437         }
3438         wakeup(&ring->desc[pkt->hdr.idx]);
3439
3440         if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3441                 device_printf(sc->sc_dev,
3442                     "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3443                     __func__, pkt->hdr.idx, ring->queued, ring->cur);
3444                 /* XXX call iwm_force_nmi() */
3445         }
3446
3447         KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3448         ring->queued--;
3449         if (ring->queued == 0)
3450                 iwm_pcie_clear_cmd_in_flight(sc);
3451 }
3452
3453 #if 0
3454 /*
3455  * necessary only for block ack mode
3456  */
3457 void
3458 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3459         uint16_t len)
3460 {
3461         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3462         uint16_t w_val;
3463
3464         scd_bc_tbl = sc->sched_dma.vaddr;
3465
3466         len += 8; /* magic numbers came naturally from paris */
3467         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3468                 len = roundup(len, 4) / 4;
3469
3470         w_val = htole16(sta_id << 12 | len);
3471
3472         /* Update TX scheduler. */
3473         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3474         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3475             BUS_DMASYNC_PREWRITE);
3476
3477         /* I really wonder what this is ?!? */
3478         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3479                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3480                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3481                     BUS_DMASYNC_PREWRITE);
3482         }
3483 }
3484 #endif
3485
3486 /*
3487  * Take an 802.11 (non-n) rate, find the relevant rate
3488  * table entry.  return the index into in_ridx[].
3489  *
3490  * The caller then uses that index back into in_ridx
3491  * to figure out the rate index programmed /into/
3492  * the firmware for this given node.
3493  */
3494 static int
3495 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3496     uint8_t rate)
3497 {
3498         int i;
3499         uint8_t r;
3500
3501         for (i = 0; i < nitems(in->in_ridx); i++) {
3502                 r = iwm_rates[in->in_ridx[i]].rate;
3503                 if (rate == r)
3504                         return (i);
3505         }
3506
3507         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3508             "%s: couldn't find an entry for rate=%d\n",
3509             __func__,
3510             rate);
3511
3512         /* XXX Return the first */
3513         /* XXX TODO: have it return the /lowest/ */
3514         return (0);
3515 }
3516
3517 static int
3518 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3519 {
3520         int i;
3521
3522         for (i = 0; i < nitems(iwm_rates); i++) {
3523                 if (iwm_rates[i].rate == rate)
3524                         return (i);
3525         }
3526         /* XXX error? */
3527         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3528             "%s: couldn't find an entry for rate=%d\n",
3529             __func__,
3530             rate);
3531         return (0);
3532 }
3533
3534 /*
3535  * Fill in the rate related information for a transmit command.
3536  */
3537 static const struct iwm_rate *
3538 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3539         struct mbuf *m, struct iwm_tx_cmd *tx)
3540 {
3541         struct ieee80211_node *ni = &in->in_ni;
3542         struct ieee80211_frame *wh;
3543         const struct ieee80211_txparam *tp = ni->ni_txparms;
3544         const struct iwm_rate *rinfo;
3545         int type;
3546         int ridx, rate_flags;
3547
3548         wh = mtod(m, struct ieee80211_frame *);
3549         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3550
3551         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3552         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3553
3554         if (type == IEEE80211_FC0_TYPE_MGT ||
3555             type == IEEE80211_FC0_TYPE_CTL ||
3556             (m->m_flags & M_EAPOL) != 0) {
3557                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3558                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3559                     "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3560         } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3561                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3562                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3563                     "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3564         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3565                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3566                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3567                     "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3568         } else {
3569                 int i;
3570
3571                 /* for data frames, use RS table */
3572                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3573                 /* XXX pass pktlen */
3574                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3575                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3576                 ridx = in->in_ridx[i];
3577
3578                 /* This is the index into the programmed table */
3579                 tx->initial_rate_index = i;
3580                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3581
3582                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3583                     "%s: start with i=%d, txrate %d\n",
3584                     __func__, i, iwm_rates[ridx].rate);
3585         }
3586
3587         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3588             "%s: frame type=%d txrate %d\n",
3589                 __func__, type, iwm_rates[ridx].rate);
3590
3591         rinfo = &iwm_rates[ridx];
3592
3593         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3594             __func__, ridx,
3595             rinfo->rate,
3596             !! (IWM_RIDX_IS_CCK(ridx))
3597             );
3598
3599         /* XXX TODO: hard-coded TX antenna? */
3600         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3601         if (IWM_RIDX_IS_CCK(ridx))
3602                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3603         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3604
3605         return rinfo;
3606 }
3607
3608 #define TB0_SIZE 16
3609 static int
3610 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3611 {
3612         struct ieee80211com *ic = &sc->sc_ic;
3613         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3614         struct iwm_node *in = IWM_NODE(ni);
3615         struct iwm_tx_ring *ring;
3616         struct iwm_tx_data *data;
3617         struct iwm_tfd *desc;
3618         struct iwm_device_cmd *cmd;
3619         struct iwm_tx_cmd *tx;
3620         struct ieee80211_frame *wh;
3621         struct ieee80211_key *k = NULL;
3622         struct mbuf *m1;
3623         const struct iwm_rate *rinfo;
3624         uint32_t flags;
3625         u_int hdrlen;
3626         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3627         int nsegs;
3628         uint8_t tid, type;
3629         int i, totlen, error, pad;
3630
3631         wh = mtod(m, struct ieee80211_frame *);
3632         hdrlen = ieee80211_anyhdrsize(wh);
3633         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3634         tid = 0;
3635         ring = &sc->txq[ac];
3636         desc = &ring->desc[ring->cur];
3637         memset(desc, 0, sizeof(*desc));
3638         data = &ring->data[ring->cur];
3639
3640         /* Fill out iwm_tx_cmd to send to the firmware */
3641         cmd = &ring->cmd[ring->cur];
3642         cmd->hdr.code = IWM_TX_CMD;
3643         cmd->hdr.flags = 0;
3644         cmd->hdr.qid = ring->qid;
3645         cmd->hdr.idx = ring->cur;
3646
3647         tx = (void *)cmd->data;
3648         memset(tx, 0, sizeof(*tx));
3649
3650         rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3651
3652         /* Encrypt the frame if need be. */
3653         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3654                 /* Retrieve key for TX && do software encryption. */
3655                 k = ieee80211_crypto_encap(ni, m);
3656                 if (k == NULL) {
3657                         m_freem(m);
3658                         return (ENOBUFS);
3659                 }
3660                 /* 802.11 header may have moved. */
3661                 wh = mtod(m, struct ieee80211_frame *);
3662         }
3663
3664         if (ieee80211_radiotap_active_vap(vap)) {
3665                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3666
3667                 tap->wt_flags = 0;
3668                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3669                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3670                 tap->wt_rate = rinfo->rate;
3671                 if (k != NULL)
3672                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3673                 ieee80211_radiotap_tx(vap, m);
3674         }
3675
3676
3677         totlen = m->m_pkthdr.len;
3678
3679         flags = 0;
3680         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3681                 flags |= IWM_TX_CMD_FLG_ACK;
3682         }
3683
3684         if (type == IEEE80211_FC0_TYPE_DATA
3685             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3686             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3687                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3688         }
3689
3690         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3691             type != IEEE80211_FC0_TYPE_DATA)
3692                 tx->sta_id = sc->sc_aux_sta.sta_id;
3693         else
3694                 tx->sta_id = IWM_STATION_ID;
3695
3696         if (type == IEEE80211_FC0_TYPE_MGT) {
3697                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3698
3699                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3700                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3701                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3702                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3703                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3704                 } else {
3705                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3706                 }
3707         } else {
3708                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3709         }
3710
3711         if (hdrlen & 3) {
3712                 /* First segment length must be a multiple of 4. */
3713                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3714                 pad = 4 - (hdrlen & 3);
3715         } else
3716                 pad = 0;
3717
3718         tx->driver_txop = 0;
3719         tx->next_frame_len = 0;
3720
3721         tx->len = htole16(totlen);
3722         tx->tid_tspec = tid;
3723         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3724
3725         /* Set physical address of "scratch area". */
3726         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3727         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3728
3729         /* Copy 802.11 header in TX command. */
3730         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3731
3732         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3733
3734         tx->sec_ctl = 0;
3735         tx->tx_flags |= htole32(flags);
3736
3737         /* Trim 802.11 header. */
3738         m_adj(m, hdrlen);
3739         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3740             segs, &nsegs, BUS_DMA_NOWAIT);
3741         if (error != 0) {
3742                 if (error != EFBIG) {
3743                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3744                             error);
3745                         m_freem(m);
3746                         return error;
3747                 }
3748                 /* Too many DMA segments, linearize mbuf. */
3749                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3750                 if (m1 == NULL) {
3751                         device_printf(sc->sc_dev,
3752                             "%s: could not defrag mbuf\n", __func__);
3753                         m_freem(m);
3754                         return (ENOBUFS);
3755                 }
3756                 m = m1;
3757
3758                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3759                     segs, &nsegs, BUS_DMA_NOWAIT);
3760                 if (error != 0) {
3761                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3762                             error);
3763                         m_freem(m);
3764                         return error;
3765                 }
3766         }
3767         data->m = m;
3768         data->in = in;
3769         data->done = 0;
3770
3771         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3772             "sending txd %p, in %p\n", data, data->in);
3773         KASSERT(data->in != NULL, ("node is NULL"));
3774
3775         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3776             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3777             ring->qid, ring->cur, totlen, nsegs,
3778             le32toh(tx->tx_flags),
3779             le32toh(tx->rate_n_flags),
3780             tx->initial_rate_index
3781             );
3782
3783         /* Fill TX descriptor. */
3784         desc->num_tbs = 2 + nsegs;
3785
3786         desc->tbs[0].lo = htole32(data->cmd_paddr);
3787         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3788             (TB0_SIZE << 4);
3789         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3790         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3791             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3792               + hdrlen + pad - TB0_SIZE) << 4);
3793
3794         /* Other DMA segments are for data payload. */
3795         for (i = 0; i < nsegs; i++) {
3796                 seg = &segs[i];
3797                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3798                 desc->tbs[i+2].hi_n_len = \
3799                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3800                     | ((seg->ds_len) << 4);
3801         }
3802
3803         bus_dmamap_sync(ring->data_dmat, data->map,
3804             BUS_DMASYNC_PREWRITE);
3805         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3806             BUS_DMASYNC_PREWRITE);
3807         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3808             BUS_DMASYNC_PREWRITE);
3809
3810 #if 0
3811         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3812 #endif
3813
3814         /* Kick TX ring. */
3815         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3816         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3817
3818         /* Mark TX ring as full if we reach a certain threshold. */
3819         if (++ring->queued > IWM_TX_RING_HIMARK) {
3820                 sc->qfullmsk |= 1 << ring->qid;
3821         }
3822
3823         return 0;
3824 }
3825
3826 static int
3827 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3828     const struct ieee80211_bpf_params *params)
3829 {
3830         struct ieee80211com *ic = ni->ni_ic;
3831         struct iwm_softc *sc = ic->ic_softc;
3832         int error = 0;
3833
3834         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3835             "->%s begin\n", __func__);
3836
3837         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3838                 m_freem(m);
3839                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3840                     "<-%s not RUNNING\n", __func__);
3841                 return (ENETDOWN);
3842         }
3843
3844         IWM_LOCK(sc);
3845         /* XXX fix this */
3846         if (params == NULL) {
3847                 error = iwm_tx(sc, m, ni, 0);
3848         } else {
3849                 error = iwm_tx(sc, m, ni, 0);
3850         }
3851         sc->sc_tx_timer = 5;
3852         IWM_UNLOCK(sc);
3853
3854         return (error);
3855 }
3856
3857 /*
3858  * mvm/tx.c
3859  */
3860
3861 /*
3862  * Note that there are transports that buffer frames before they reach
3863  * the firmware. This means that after flush_tx_path is called, the
3864  * queue might not be empty. The race-free way to handle this is to:
3865  * 1) set the station as draining
3866  * 2) flush the Tx path
3867  * 3) wait for the transport queues to be empty
3868  */
3869 int
3870 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3871 {
3872         int ret;
3873         struct iwm_tx_path_flush_cmd flush_cmd = {
3874                 .queues_ctl = htole32(tfd_msk),
3875                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3876         };
3877
3878         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3879             sizeof(flush_cmd), &flush_cmd);
3880         if (ret)
3881                 device_printf(sc->sc_dev,
3882                     "Flushing tx queue failed: %d\n", ret);
3883         return ret;
3884 }
3885
3886 /*
3887  * BEGIN mvm/sta.c
3888  */
3889
3890 static int
3891 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3892         struct iwm_mvm_add_sta_cmd *cmd, int *status)
3893 {
3894         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3895             cmd, status);
3896 }
3897
3898 /* send station add/update command to firmware */
3899 static int
3900 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3901 {
3902         struct iwm_vap *ivp = IWM_VAP(in->in_ni.ni_vap);
3903         struct iwm_mvm_add_sta_cmd add_sta_cmd;
3904         int ret;
3905         uint32_t status;
3906
3907         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3908
3909         add_sta_cmd.sta_id = IWM_STATION_ID;
3910         add_sta_cmd.mac_id_n_color
3911             = htole32(IWM_FW_CMD_ID_AND_COLOR(ivp->id, ivp->color));
3912         if (!update) {
3913                 int ac;
3914                 for (ac = 0; ac < WME_NUM_AC; ac++) {
3915                         add_sta_cmd.tfd_queue_msk |=
3916                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3917                 }
3918                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3919         }
3920         add_sta_cmd.add_modify = update ? 1 : 0;
3921         add_sta_cmd.station_flags_msk
3922             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3923         add_sta_cmd.tid_disable_tx = htole16(0xffff);
3924         if (update)
3925                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3926
3927         status = IWM_ADD_STA_SUCCESS;
3928         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3929         if (ret)
3930                 return ret;
3931
3932         switch (status & IWM_ADD_STA_STATUS_MASK) {
3933         case IWM_ADD_STA_SUCCESS:
3934                 break;
3935         default:
3936                 ret = EIO;
3937                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3938                 break;
3939         }
3940
3941         return ret;
3942 }
3943
3944 static int
3945 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3946 {
3947         return iwm_mvm_sta_send_to_fw(sc, in, 0);
3948 }
3949
3950 static int
3951 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3952 {
3953         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3954 }
3955
3956 static int
3957 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3958         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3959 {
3960         struct iwm_mvm_add_sta_cmd cmd;
3961         int ret;
3962         uint32_t status;
3963
3964         memset(&cmd, 0, sizeof(cmd));
3965         cmd.sta_id = sta->sta_id;
3966         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3967
3968         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3969         cmd.tid_disable_tx = htole16(0xffff);
3970
3971         if (addr)
3972                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3973
3974         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3975         if (ret)
3976                 return ret;
3977
3978         switch (status & IWM_ADD_STA_STATUS_MASK) {
3979         case IWM_ADD_STA_SUCCESS:
3980                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3981                     "%s: Internal station added.\n", __func__);
3982                 return 0;
3983         default:
3984                 device_printf(sc->sc_dev,
3985                     "%s: Add internal station failed, status=0x%x\n",
3986                     __func__, status);
3987                 ret = EIO;
3988                 break;
3989         }
3990         return ret;
3991 }
3992
3993 static int
3994 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3995 {
3996         int ret;
3997
3998         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3999         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
4000
4001         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
4002         if (ret)
4003                 return ret;
4004
4005         ret = iwm_mvm_add_int_sta_common(sc,
4006             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4007
4008         if (ret)
4009                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4010         return ret;
4011 }
4012
4013 /*
4014  * END mvm/sta.c
4015  */
4016
4017 /*
4018  * BEGIN mvm/quota.c
4019  */
4020
4021 static int
4022 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
4023 {
4024         struct iwm_time_quota_cmd cmd;
4025         int i, idx, ret, num_active_macs, quota, quota_rem;
4026         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4027         int n_ifs[IWM_MAX_BINDINGS] = {0, };
4028         uint16_t id;
4029
4030         memset(&cmd, 0, sizeof(cmd));
4031
4032         /* currently, PHY ID == binding ID */
4033         if (ivp) {
4034                 id = ivp->phy_ctxt->id;
4035                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4036                 colors[id] = ivp->phy_ctxt->color;
4037
4038                 if (1)
4039                         n_ifs[id] = 1;
4040         }
4041
4042         /*
4043          * The FW's scheduling session consists of
4044          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
4045          * equally between all the bindings that require quota
4046          */
4047         num_active_macs = 0;
4048         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4049                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4050                 num_active_macs += n_ifs[i];
4051         }
4052
4053         quota = 0;
4054         quota_rem = 0;
4055         if (num_active_macs) {
4056                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
4057                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
4058         }
4059
4060         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4061                 if (colors[i] < 0)
4062                         continue;
4063
4064                 cmd.quotas[idx].id_and_color =
4065                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4066
4067                 if (n_ifs[i] <= 0) {
4068                         cmd.quotas[idx].quota = htole32(0);
4069                         cmd.quotas[idx].max_duration = htole32(0);
4070                 } else {
4071                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4072                         cmd.quotas[idx].max_duration = htole32(0);
4073                 }
4074                 idx++;
4075         }
4076
4077         /* Give the remainder of the session to the first binding */
4078         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4079
4080         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4081             sizeof(cmd), &cmd);
4082         if (ret)
4083                 device_printf(sc->sc_dev,
4084                     "%s: Failed to send quota: %d\n", __func__, ret);
4085         return ret;
4086 }
4087
4088 /*
4089  * END mvm/quota.c
4090  */
4091
4092 /*
4093  * ieee80211 routines
4094  */
4095
4096 /*
4097  * Change to AUTH state in 80211 state machine.  Roughly matches what
4098  * Linux does in bss_info_changed().
4099  */
4100 static int
4101 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4102 {
4103         struct ieee80211_node *ni;
4104         struct iwm_node *in;
4105         struct iwm_vap *iv = IWM_VAP(vap);
4106         uint32_t duration;
4107         int error;
4108
4109         /*
4110          * XXX i have a feeling that the vap node is being
4111          * freed from underneath us. Grr.
4112          */
4113         ni = ieee80211_ref_node(vap->iv_bss);
4114         in = IWM_NODE(ni);
4115         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4116             "%s: called; vap=%p, bss ni=%p\n",
4117             __func__,
4118             vap,
4119             ni);
4120
4121         in->in_assoc = 0;
4122
4123         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4124         if (error != 0)
4125                 return error;
4126
4127         error = iwm_allow_mcast(vap, sc);
4128         if (error) {
4129                 device_printf(sc->sc_dev,
4130                     "%s: failed to set multicast\n", __func__);
4131                 goto out;
4132         }
4133
4134         /*
4135          * This is where it deviates from what Linux does.
4136          *
4137          * Linux iwlwifi doesn't reset the nic each time, nor does it
4138          * call ctxt_add() here.  Instead, it adds it during vap creation,
4139          * and always does a mac_ctx_changed().
4140          *
4141          * The openbsd port doesn't attempt to do that - it reset things
4142          * at odd states and does the add here.
4143          *
4144          * So, until the state handling is fixed (ie, we never reset
4145          * the NIC except for a firmware failure, which should drag
4146          * the NIC back to IDLE, re-setup and re-add all the mac/phy
4147          * contexts that are required), let's do a dirty hack here.
4148          */
4149         if (iv->is_uploaded) {
4150                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4151                         device_printf(sc->sc_dev,
4152                             "%s: failed to update MAC\n", __func__);
4153                         goto out;
4154                 }
4155         } else {
4156                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4157                         device_printf(sc->sc_dev,
4158                             "%s: failed to add MAC\n", __func__);
4159                         goto out;
4160                 }
4161                 if ((error = iwm_mvm_power_update_mac(sc)) != 0) {
4162                         device_printf(sc->sc_dev,
4163                             "%s: failed to update power management\n",
4164                             __func__);
4165                         goto out;
4166                 }
4167         }
4168
4169         if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4170             in->in_ni.ni_chan, 1, 1)) != 0) {
4171                 device_printf(sc->sc_dev,
4172                     "%s: failed update phy ctxt\n", __func__);
4173                 goto out;
4174         }
4175         iv->phy_ctxt = &sc->sc_phyctxt[0];
4176
4177         if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4178                 device_printf(sc->sc_dev,
4179                     "%s: binding update cmd\n", __func__);
4180                 goto out;
4181         }
4182         if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4183                 device_printf(sc->sc_dev,
4184                     "%s: failed to add sta\n", __func__);
4185                 goto out;
4186         }
4187
4188         /*
4189          * Prevent the FW from wandering off channel during association
4190          * by "protecting" the session with a time event.
4191          */
4192         /* XXX duration is in units of TU, not MS */
4193         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4194         iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4195         DELAY(100);
4196
4197         error = 0;
4198 out:
4199         ieee80211_free_node(ni);
4200         return (error);
4201 }
4202
4203 static int
4204 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4205 {
4206         uint32_t tfd_msk;
4207
4208         /*
4209          * Ok, so *technically* the proper set of calls for going
4210          * from RUN back to SCAN is:
4211          *
4212          * iwm_mvm_power_mac_disable(sc, in);
4213          * iwm_mvm_mac_ctxt_changed(sc, vap);
4214          * iwm_mvm_rm_sta(sc, in);
4215          * iwm_mvm_update_quotas(sc, NULL);
4216          * iwm_mvm_mac_ctxt_changed(sc, in);
4217          * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4218          * iwm_mvm_mac_ctxt_remove(sc, in);
4219          *
4220          * However, that freezes the device not matter which permutations
4221          * and modifications are attempted.  Obviously, this driver is missing
4222          * something since it works in the Linux driver, but figuring out what
4223          * is missing is a little more complicated.  Now, since we're going
4224          * back to nothing anyway, we'll just do a complete device reset.
4225          * Up your's, device!
4226          */
4227         /*
4228          * Just using 0xf for the queues mask is fine as long as we only
4229          * get here from RUN state.
4230          */
4231         tfd_msk = 0xf;
4232         mbufq_drain(&sc->sc_snd);
4233         iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4234         /*
4235          * We seem to get away with just synchronously sending the
4236          * IWM_TXPATH_FLUSH command.
4237          */
4238 //      iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4239         iwm_stop_device(sc);
4240         iwm_init_hw(sc);
4241         if (in)
4242                 in->in_assoc = 0;
4243         return 0;
4244
4245 #if 0
4246         int error;
4247
4248         iwm_mvm_power_mac_disable(sc, in);
4249
4250         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4251                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4252                 return error;
4253         }
4254
4255         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4256                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4257                 return error;
4258         }
4259         error = iwm_mvm_rm_sta(sc, in);
4260         in->in_assoc = 0;
4261         iwm_mvm_update_quotas(sc, NULL);
4262         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4263                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4264                 return error;
4265         }
4266         iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4267
4268         iwm_mvm_mac_ctxt_remove(sc, in);
4269
4270         return error;
4271 #endif
4272 }
4273
4274 static struct ieee80211_node *
4275 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4276 {
4277         return malloc(sizeof (struct iwm_node), M_80211_NODE,
4278             M_NOWAIT | M_ZERO);
4279 }
4280
4281 uint8_t
4282 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4283 {
4284         int i;
4285         uint8_t rval;
4286
4287         for (i = 0; i < rs->rs_nrates; i++) {
4288                 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4289                 if (rval == iwm_rates[ridx].rate)
4290                         return rs->rs_rates[i];
4291         }
4292
4293         return 0;
4294 }
4295
4296 static void
4297 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4298 {
4299         struct ieee80211_node *ni = &in->in_ni;
4300         struct iwm_lq_cmd *lq = &in->in_lq;
4301         int nrates = ni->ni_rates.rs_nrates;
4302         int i, ridx, tab = 0;
4303 //      int txant = 0;
4304
4305         if (nrates > nitems(lq->rs_table)) {
4306                 device_printf(sc->sc_dev,
4307                     "%s: node supports %d rates, driver handles "
4308                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4309                 return;
4310         }
4311         if (nrates == 0) {
4312                 device_printf(sc->sc_dev,
4313                     "%s: node supports 0 rates, odd!\n", __func__);
4314                 return;
4315         }
4316
4317         /*
4318          * XXX .. and most of iwm_node is not initialised explicitly;
4319          * it's all just 0x0 passed to the firmware.
4320          */
4321
4322         /* first figure out which rates we should support */
4323         /* XXX TODO: this isn't 11n aware /at all/ */
4324         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4325         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4326             "%s: nrates=%d\n", __func__, nrates);
4327
4328         /*
4329          * Loop over nrates and populate in_ridx from the highest
4330          * rate to the lowest rate.  Remember, in_ridx[] has
4331          * IEEE80211_RATE_MAXSIZE entries!
4332          */
4333         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4334                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4335
4336                 /* Map 802.11 rate to HW rate index. */
4337                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4338                         if (iwm_rates[ridx].rate == rate)
4339                                 break;
4340                 if (ridx > IWM_RIDX_MAX) {
4341                         device_printf(sc->sc_dev,
4342                             "%s: WARNING: device rate for %d not found!\n",
4343                             __func__, rate);
4344                 } else {
4345                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4346                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4347                             __func__,
4348                             i,
4349                             rate,
4350                             ridx);
4351                         in->in_ridx[i] = ridx;
4352                 }
4353         }
4354
4355         /* then construct a lq_cmd based on those */
4356         memset(lq, 0, sizeof(*lq));
4357         lq->sta_id = IWM_STATION_ID;
4358
4359         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4360         if (ni->ni_flags & IEEE80211_NODE_HT)
4361                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4362
4363         /*
4364          * are these used? (we don't do SISO or MIMO)
4365          * need to set them to non-zero, though, or we get an error.
4366          */
4367         lq->single_stream_ant_msk = 1;
4368         lq->dual_stream_ant_msk = 1;
4369
4370         /*
4371          * Build the actual rate selection table.
4372          * The lowest bits are the rates.  Additionally,
4373          * CCK needs bit 9 to be set.  The rest of the bits
4374          * we add to the table select the tx antenna
4375          * Note that we add the rates in the highest rate first
4376          * (opposite of ni_rates).
4377          */
4378         /*
4379          * XXX TODO: this should be looping over the min of nrates
4380          * and LQ_MAX_RETRY_NUM.  Sigh.
4381          */
4382         for (i = 0; i < nrates; i++) {
4383                 int nextant;
4384
4385 #if 0
4386                 if (txant == 0)
4387                         txant = iwm_mvm_get_valid_tx_ant(sc);
4388                 nextant = 1<<(ffs(txant)-1);
4389                 txant &= ~nextant;
4390 #else
4391                 nextant = iwm_mvm_get_valid_tx_ant(sc);
4392 #endif
4393                 /*
4394                  * Map the rate id into a rate index into
4395                  * our hardware table containing the
4396                  * configuration to use for this rate.
4397                  */
4398                 ridx = in->in_ridx[i];
4399                 tab = iwm_rates[ridx].plcp;
4400                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4401                 if (IWM_RIDX_IS_CCK(ridx))
4402                         tab |= IWM_RATE_MCS_CCK_MSK;
4403                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4404                     "station rate i=%d, rate=%d, hw=%x\n",
4405                     i, iwm_rates[ridx].rate, tab);
4406                 lq->rs_table[i] = htole32(tab);
4407         }
4408         /* then fill the rest with the lowest possible rate */
4409         for (i = nrates; i < nitems(lq->rs_table); i++) {
4410                 KASSERT(tab != 0, ("invalid tab"));
4411                 lq->rs_table[i] = htole32(tab);
4412         }
4413 }
4414
4415 static int
4416 iwm_media_change(struct ifnet *ifp)
4417 {
4418         struct ieee80211vap *vap = ifp->if_softc;
4419         struct ieee80211com *ic = vap->iv_ic;
4420         struct iwm_softc *sc = ic->ic_softc;
4421         int error;
4422
4423         error = ieee80211_media_change(ifp);
4424         if (error != ENETRESET)
4425                 return error;
4426
4427         IWM_LOCK(sc);
4428         if (ic->ic_nrunning > 0) {
4429                 iwm_stop(sc);
4430                 iwm_init(sc);
4431         }
4432         IWM_UNLOCK(sc);
4433         return error;
4434 }
4435
4436
4437 static int
4438 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4439 {
4440         struct iwm_vap *ivp = IWM_VAP(vap);
4441         struct ieee80211com *ic = vap->iv_ic;
4442         struct iwm_softc *sc = ic->ic_softc;
4443         struct iwm_node *in;
4444         int error;
4445
4446         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4447             "switching state %s -> %s\n",
4448             ieee80211_state_name[vap->iv_state],
4449             ieee80211_state_name[nstate]);
4450         IEEE80211_UNLOCK(ic);
4451         IWM_LOCK(sc);
4452
4453         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4454                 iwm_led_blink_stop(sc);
4455
4456         /* disable beacon filtering if we're hopping out of RUN */
4457         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4458                 iwm_mvm_disable_beacon_filter(sc);
4459
4460                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4461                         in->in_assoc = 0;
4462
4463                 if (nstate == IEEE80211_S_INIT) {
4464                         IWM_UNLOCK(sc);
4465                         IEEE80211_LOCK(ic);
4466                         error = ivp->iv_newstate(vap, nstate, arg);
4467                         IEEE80211_UNLOCK(ic);
4468                         IWM_LOCK(sc);
4469                         iwm_release(sc, NULL);
4470                         IWM_UNLOCK(sc);
4471                         IEEE80211_LOCK(ic);
4472                         return error;
4473                 }
4474
4475                 /*
4476                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4477                  * above then the card will be completely reinitialized,
4478                  * so the driver must do everything necessary to bring the card
4479                  * from INIT to SCAN.
4480                  *
4481                  * Additionally, upon receiving deauth frame from AP,
4482                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4483                  * state. This will also fail with this driver, so bring the FSM
4484                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4485                  *
4486                  * XXX TODO: fix this for FreeBSD!
4487                  */
4488                 if (nstate == IEEE80211_S_SCAN ||
4489                     nstate == IEEE80211_S_AUTH ||
4490                     nstate == IEEE80211_S_ASSOC) {
4491                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4492                             "Force transition to INIT; MGT=%d\n", arg);
4493                         IWM_UNLOCK(sc);
4494                         IEEE80211_LOCK(ic);
4495                         /* Always pass arg as -1 since we can't Tx right now. */
4496                         /*
4497                          * XXX arg is just ignored anyway when transitioning
4498                          *     to IEEE80211_S_INIT.
4499                          */
4500                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4501                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4502                             "Going INIT->SCAN\n");
4503                         nstate = IEEE80211_S_SCAN;
4504                         IEEE80211_UNLOCK(ic);
4505                         IWM_LOCK(sc);
4506                 }
4507         }
4508
4509         switch (nstate) {
4510         case IEEE80211_S_INIT:
4511                 break;
4512
4513         case IEEE80211_S_AUTH:
4514                 if ((error = iwm_auth(vap, sc)) != 0) {
4515                         device_printf(sc->sc_dev,
4516                             "%s: could not move to auth state: %d\n",
4517                             __func__, error);
4518                 }
4519                 break;
4520
4521         case IEEE80211_S_ASSOC:
4522                 /*
4523                  * EBS may be disabled due to previous failures reported by FW.
4524                  * Reset EBS status here assuming environment has been changed.
4525                  */
4526                 sc->last_ebs_successful = TRUE;
4527                 break;
4528
4529         case IEEE80211_S_RUN:
4530         {
4531                 struct iwm_host_cmd cmd = {
4532                         .id = IWM_LQ_CMD,
4533                         .len = { sizeof(in->in_lq), },
4534                         .flags = IWM_CMD_SYNC,
4535                 };
4536
4537                 in = IWM_NODE(vap->iv_bss);
4538                 /* Update the association state, now we have it all */
4539                 /* (eg associd comes in at this point */
4540                 error = iwm_mvm_update_sta(sc, in);
4541                 if (error != 0) {
4542                         device_printf(sc->sc_dev,
4543                             "%s: failed to update STA\n", __func__);
4544                         IWM_UNLOCK(sc);
4545                         IEEE80211_LOCK(ic);
4546                         return error;
4547                 }
4548                 in->in_assoc = 1;
4549                 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4550                 if (error != 0) {
4551                         device_printf(sc->sc_dev,
4552                             "%s: failed to update MAC: %d\n", __func__, error);
4553                 }
4554
4555                 iwm_mvm_enable_beacon_filter(sc, in);
4556                 iwm_mvm_power_update_mac(sc);
4557                 iwm_mvm_update_quotas(sc, ivp);
4558                 iwm_setrates(sc, in);
4559
4560                 cmd.data[0] = &in->in_lq;
4561                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4562                         device_printf(sc->sc_dev,
4563                             "%s: IWM_LQ_CMD failed\n", __func__);
4564                 }
4565
4566                 iwm_mvm_led_enable(sc);
4567                 break;
4568         }
4569
4570         default:
4571                 break;
4572         }
4573         IWM_UNLOCK(sc);
4574         IEEE80211_LOCK(ic);
4575
4576         return (ivp->iv_newstate(vap, nstate, arg));
4577 }
4578
4579 void
4580 iwm_endscan_cb(void *arg, int pending)
4581 {
4582         struct iwm_softc *sc = arg;
4583         struct ieee80211com *ic = &sc->sc_ic;
4584
4585         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4586             "%s: scan ended\n",
4587             __func__);
4588
4589         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4590 }
4591
4592 /*
4593  * Aging and idle timeouts for the different possible scenarios
4594  * in default configuration
4595  */
4596 static const uint32_t
4597 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4598         {
4599                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4600                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4601         },
4602         {
4603                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4604                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4605         },
4606         {
4607                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4608                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4609         },
4610         {
4611                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4612                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4613         },
4614         {
4615                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4616                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4617         },
4618 };
4619
4620 /*
4621  * Aging and idle timeouts for the different possible scenarios
4622  * in single BSS MAC configuration.
4623  */
4624 static const uint32_t
4625 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4626         {
4627                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4628                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4629         },
4630         {
4631                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4632                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4633         },
4634         {
4635                 htole32(IWM_SF_MCAST_AGING_TIMER),
4636                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4637         },
4638         {
4639                 htole32(IWM_SF_BA_AGING_TIMER),
4640                 htole32(IWM_SF_BA_IDLE_TIMER)
4641         },
4642         {
4643                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4644                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4645         },
4646 };
4647
4648 static void
4649 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4650     struct ieee80211_node *ni)
4651 {
4652         int i, j, watermark;
4653
4654         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4655
4656         /*
4657          * If we are in association flow - check antenna configuration
4658          * capabilities of the AP station, and choose the watermark accordingly.
4659          */
4660         if (ni) {
4661                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4662 #ifdef notyet
4663                         if (ni->ni_rxmcs[2] != 0)
4664                                 watermark = IWM_SF_W_MARK_MIMO3;
4665                         else if (ni->ni_rxmcs[1] != 0)
4666                                 watermark = IWM_SF_W_MARK_MIMO2;
4667                         else
4668 #endif
4669                                 watermark = IWM_SF_W_MARK_SISO;
4670                 } else {
4671                         watermark = IWM_SF_W_MARK_LEGACY;
4672                 }
4673         /* default watermark value for unassociated mode. */
4674         } else {
4675                 watermark = IWM_SF_W_MARK_MIMO2;
4676         }
4677         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4678
4679         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4680                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4681                         sf_cmd->long_delay_timeouts[i][j] =
4682                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4683                 }
4684         }
4685
4686         if (ni) {
4687                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4688                        sizeof(iwm_sf_full_timeout));
4689         } else {
4690                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4691                        sizeof(iwm_sf_full_timeout_def));
4692         }
4693 }
4694
4695 static int
4696 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4697 {
4698         struct ieee80211com *ic = &sc->sc_ic;
4699         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4700         struct iwm_sf_cfg_cmd sf_cmd = {
4701                 .state = htole32(IWM_SF_FULL_ON),
4702         };
4703         int ret = 0;
4704
4705         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4706                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4707
4708         switch (new_state) {
4709         case IWM_SF_UNINIT:
4710         case IWM_SF_INIT_OFF:
4711                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4712                 break;
4713         case IWM_SF_FULL_ON:
4714                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4715                 break;
4716         default:
4717                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4718                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4719                           new_state);
4720                 return EINVAL;
4721         }
4722
4723         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4724                                    sizeof(sf_cmd), &sf_cmd);
4725         return ret;
4726 }
4727
4728 static int
4729 iwm_send_bt_init_conf(struct iwm_softc *sc)
4730 {
4731         struct iwm_bt_coex_cmd bt_cmd;
4732
4733         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4734         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4735
4736         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4737             &bt_cmd);
4738 }
4739
4740 static int
4741 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4742 {
4743         struct iwm_mcc_update_cmd mcc_cmd;
4744         struct iwm_host_cmd hcmd = {
4745                 .id = IWM_MCC_UPDATE_CMD,
4746                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4747                 .data = { &mcc_cmd },
4748         };
4749         int ret;
4750 #ifdef IWM_DEBUG
4751         struct iwm_rx_packet *pkt;
4752         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4753         struct iwm_mcc_update_resp *mcc_resp;
4754         int n_channels;
4755         uint16_t mcc;
4756 #endif
4757         int resp_v2 = fw_has_capa(&sc->ucode_capa,
4758             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4759
4760         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4761         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4762         if (fw_has_api(&sc->ucode_capa, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4763             fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4764                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4765         else
4766                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4767
4768         if (resp_v2)
4769                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4770         else
4771                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4772
4773         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4774             "send MCC update to FW with '%c%c' src = %d\n",
4775             alpha2[0], alpha2[1], mcc_cmd.source_id);
4776
4777         ret = iwm_send_cmd(sc, &hcmd);
4778         if (ret)
4779                 return ret;
4780
4781 #ifdef IWM_DEBUG
4782         pkt = hcmd.resp_pkt;
4783
4784         /* Extract MCC response */
4785         if (resp_v2) {
4786                 mcc_resp = (void *)pkt->data;
4787                 mcc = mcc_resp->mcc;
4788                 n_channels =  le32toh(mcc_resp->n_channels);
4789         } else {
4790                 mcc_resp_v1 = (void *)pkt->data;
4791                 mcc = mcc_resp_v1->mcc;
4792                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4793         }
4794
4795         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4796         if (mcc == 0)
4797                 mcc = 0x3030;  /* "00" - world */
4798
4799         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4800             "regulatory domain '%c%c' (%d channels available)\n",
4801             mcc >> 8, mcc & 0xff, n_channels);
4802 #endif
4803         iwm_free_resp(sc, &hcmd);
4804
4805         return 0;
4806 }
4807
4808 static void
4809 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4810 {
4811         struct iwm_host_cmd cmd = {
4812                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4813                 .len = { sizeof(uint32_t), },
4814                 .data = { &backoff, },
4815         };
4816
4817         if (iwm_send_cmd(sc, &cmd) != 0) {
4818                 device_printf(sc->sc_dev,
4819                     "failed to change thermal tx backoff\n");
4820         }
4821 }
4822
4823 static int
4824 iwm_init_hw(struct iwm_softc *sc)
4825 {
4826         struct ieee80211com *ic = &sc->sc_ic;
4827         int error, i, ac;
4828
4829         if ((error = iwm_start_hw(sc)) != 0) {
4830                 printf("iwm_start_hw: failed %d\n", error);
4831                 return error;
4832         }
4833
4834         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4835                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4836                 return error;
4837         }
4838
4839         /*
4840          * should stop and start HW since that INIT
4841          * image just loaded
4842          */
4843         iwm_stop_device(sc);
4844         sc->sc_ps_disabled = FALSE;
4845         if ((error = iwm_start_hw(sc)) != 0) {
4846                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4847                 return error;
4848         }
4849
4850         /* omstart, this time with the regular firmware */
4851         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4852         if (error) {
4853                 device_printf(sc->sc_dev, "could not load firmware\n");
4854                 goto error;
4855         }
4856
4857         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4858                 device_printf(sc->sc_dev, "bt init conf failed\n");
4859                 goto error;
4860         }
4861
4862         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4863         if (error != 0) {
4864                 device_printf(sc->sc_dev, "antenna config failed\n");
4865                 goto error;
4866         }
4867
4868         /* Send phy db control command and then phy db calibration */
4869         if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4870                 goto error;
4871
4872         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4873                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4874                 goto error;
4875         }
4876
4877         /* Add auxiliary station for scanning */
4878         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4879                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4880                 goto error;
4881         }
4882
4883         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4884                 /*
4885                  * The channel used here isn't relevant as it's
4886                  * going to be overwritten in the other flows.
4887                  * For now use the first channel we have.
4888                  */
4889                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4890                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4891                         goto error;
4892         }
4893
4894         /* Initialize tx backoffs to the minimum. */
4895         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4896                 iwm_mvm_tt_tx_backoff(sc, 0);
4897
4898         error = iwm_mvm_power_update_device(sc);
4899         if (error)
4900                 goto error;
4901
4902         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4903                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4904                         goto error;
4905         }
4906
4907         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4908                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4909                         goto error;
4910         }
4911
4912         /* Enable Tx queues. */
4913         for (ac = 0; ac < WME_NUM_AC; ac++) {
4914                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4915                     iwm_mvm_ac_to_tx_fifo[ac]);
4916                 if (error)
4917                         goto error;
4918         }
4919
4920         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4921                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4922                 goto error;
4923         }
4924
4925         return 0;
4926
4927  error:
4928         iwm_stop_device(sc);
4929         return error;
4930 }
4931
4932 /* Allow multicast from our BSSID. */
4933 static int
4934 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4935 {
4936         struct ieee80211_node *ni = vap->iv_bss;
4937         struct iwm_mcast_filter_cmd *cmd;
4938         size_t size;
4939         int error;
4940
4941         size = roundup(sizeof(*cmd), 4);
4942         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4943         if (cmd == NULL)
4944                 return ENOMEM;
4945         cmd->filter_own = 1;
4946         cmd->port_id = 0;
4947         cmd->count = 0;
4948         cmd->pass_all = 1;
4949         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4950
4951         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4952             IWM_CMD_SYNC, size, cmd);
4953         free(cmd, M_DEVBUF);
4954
4955         return (error);
4956 }
4957
4958 /*
4959  * ifnet interfaces
4960  */
4961
4962 static void
4963 iwm_init(struct iwm_softc *sc)
4964 {
4965         int error;
4966
4967         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4968                 return;
4969         }
4970         sc->sc_generation++;
4971         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4972
4973         if ((error = iwm_init_hw(sc)) != 0) {
4974                 printf("iwm_init_hw failed %d\n", error);
4975                 iwm_stop(sc);
4976                 return;
4977         }
4978
4979         /*
4980          * Ok, firmware loaded and we are jogging
4981          */
4982         sc->sc_flags |= IWM_FLAG_HW_INITED;
4983         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4984 }
4985
4986 static int
4987 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4988 {
4989         struct iwm_softc *sc;
4990         int error;
4991
4992         sc = ic->ic_softc;
4993
4994         IWM_LOCK(sc);
4995         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4996                 IWM_UNLOCK(sc);
4997                 return (ENXIO);
4998         }
4999         error = mbufq_enqueue(&sc->sc_snd, m);
5000         if (error) {
5001                 IWM_UNLOCK(sc);
5002                 return (error);
5003         }
5004         iwm_start(sc);
5005         IWM_UNLOCK(sc);
5006         return (0);
5007 }
5008
5009 /*
5010  * Dequeue packets from sendq and call send.
5011  */
5012 static void
5013 iwm_start(struct iwm_softc *sc)
5014 {
5015         struct ieee80211_node *ni;
5016         struct mbuf *m;
5017         int ac = 0;
5018
5019         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5020         while (sc->qfullmsk == 0 &&
5021                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5022                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5023                 if (iwm_tx(sc, m, ni, ac) != 0) {
5024                         if_inc_counter(ni->ni_vap->iv_ifp,
5025                             IFCOUNTER_OERRORS, 1);
5026                         ieee80211_free_node(ni);
5027                         continue;
5028                 }
5029                 sc->sc_tx_timer = 15;
5030         }
5031         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5032 }
5033
5034 static void
5035 iwm_stop(struct iwm_softc *sc)
5036 {
5037
5038         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5039         sc->sc_flags |= IWM_FLAG_STOPPED;
5040         sc->sc_generation++;
5041         iwm_led_blink_stop(sc);
5042         sc->sc_tx_timer = 0;
5043         iwm_stop_device(sc);
5044         sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5045 }
5046
5047 static void
5048 iwm_watchdog(void *arg)
5049 {
5050         struct iwm_softc *sc = arg;
5051         struct ieee80211com *ic = &sc->sc_ic;
5052
5053         if (sc->sc_tx_timer > 0) {
5054                 if (--sc->sc_tx_timer == 0) {
5055                         device_printf(sc->sc_dev, "device timeout\n");
5056 #ifdef IWM_DEBUG
5057                         iwm_nic_error(sc);
5058 #endif
5059                         ieee80211_restart_all(ic);
5060                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5061                         return;
5062                 }
5063         }
5064         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5065 }
5066
5067 static void
5068 iwm_parent(struct ieee80211com *ic)
5069 {
5070         struct iwm_softc *sc = ic->ic_softc;
5071         int startall = 0;
5072
5073         IWM_LOCK(sc);
5074         if (ic->ic_nrunning > 0) {
5075                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5076                         iwm_init(sc);
5077                         startall = 1;
5078                 }
5079         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5080                 iwm_stop(sc);
5081         IWM_UNLOCK(sc);
5082         if (startall)
5083                 ieee80211_start_all(ic);
5084 }
5085
5086 /*
5087  * The interrupt side of things
5088  */
5089
5090 /*
5091  * error dumping routines are from iwlwifi/mvm/utils.c
5092  */
5093
5094 /*
5095  * Note: This structure is read from the device with IO accesses,
5096  * and the reading already does the endian conversion. As it is
5097  * read with uint32_t-sized accesses, any members with a different size
5098  * need to be ordered correctly though!
5099  */
5100 struct iwm_error_event_table {
5101         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5102         uint32_t error_id;              /* type of error */
5103         uint32_t trm_hw_status0;        /* TRM HW status */
5104         uint32_t trm_hw_status1;        /* TRM HW status */
5105         uint32_t blink2;                /* branch link */
5106         uint32_t ilink1;                /* interrupt link */
5107         uint32_t ilink2;                /* interrupt link */
5108         uint32_t data1;         /* error-specific data */
5109         uint32_t data2;         /* error-specific data */
5110         uint32_t data3;         /* error-specific data */
5111         uint32_t bcon_time;             /* beacon timer */
5112         uint32_t tsf_low;               /* network timestamp function timer */
5113         uint32_t tsf_hi;                /* network timestamp function timer */
5114         uint32_t gp1;           /* GP1 timer register */
5115         uint32_t gp2;           /* GP2 timer register */
5116         uint32_t fw_rev_type;   /* firmware revision type */
5117         uint32_t major;         /* uCode version major */
5118         uint32_t minor;         /* uCode version minor */
5119         uint32_t hw_ver;                /* HW Silicon version */
5120         uint32_t brd_ver;               /* HW board version */
5121         uint32_t log_pc;                /* log program counter */
5122         uint32_t frame_ptr;             /* frame pointer */
5123         uint32_t stack_ptr;             /* stack pointer */
5124         uint32_t hcmd;          /* last host command header */
5125         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
5126                                  * rxtx_flag */
5127         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
5128                                  * host_flag */
5129         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
5130                                  * enc_flag */
5131         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
5132                                  * time_flag */
5133         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
5134                                  * wico interrupt */
5135         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
5136         uint32_t wait_event;            /* wait event() caller address */
5137         uint32_t l2p_control;   /* L2pControlField */
5138         uint32_t l2p_duration;  /* L2pDurationField */
5139         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
5140         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
5141         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
5142                                  * (LMPM_PMG_SEL) */
5143         uint32_t u_timestamp;   /* indicate when the date and time of the
5144                                  * compilation */
5145         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
5146 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5147
5148 /*
5149  * UMAC error struct - relevant starting from family 8000 chip.
5150  * Note: This structure is read from the device with IO accesses,
5151  * and the reading already does the endian conversion. As it is
5152  * read with u32-sized accesses, any members with a different size
5153  * need to be ordered correctly though!
5154  */
5155 struct iwm_umac_error_event_table {
5156         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5157         uint32_t error_id;      /* type of error */
5158         uint32_t blink1;        /* branch link */
5159         uint32_t blink2;        /* branch link */
5160         uint32_t ilink1;        /* interrupt link */
5161         uint32_t ilink2;        /* interrupt link */
5162         uint32_t data1;         /* error-specific data */
5163         uint32_t data2;         /* error-specific data */
5164         uint32_t data3;         /* error-specific data */
5165         uint32_t umac_major;
5166         uint32_t umac_minor;
5167         uint32_t frame_pointer; /* core register 27*/
5168         uint32_t stack_pointer; /* core register 28 */
5169         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
5170         uint32_t nic_isr_pref;  /* ISR status register */
5171 } __packed;
5172
5173 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5174 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5175
5176 #ifdef IWM_DEBUG
5177 struct {
5178         const char *name;
5179         uint8_t num;
5180 } advanced_lookup[] = {
5181         { "NMI_INTERRUPT_WDG", 0x34 },
5182         { "SYSASSERT", 0x35 },
5183         { "UCODE_VERSION_MISMATCH", 0x37 },
5184         { "BAD_COMMAND", 0x38 },
5185         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5186         { "FATAL_ERROR", 0x3D },
5187         { "NMI_TRM_HW_ERR", 0x46 },
5188         { "NMI_INTERRUPT_TRM", 0x4C },
5189         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5190         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5191         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5192         { "NMI_INTERRUPT_HOST", 0x66 },
5193         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5194         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5195         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5196         { "ADVANCED_SYSASSERT", 0 },
5197 };
5198
5199 static const char *
5200 iwm_desc_lookup(uint32_t num)
5201 {
5202         int i;
5203
5204         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5205                 if (advanced_lookup[i].num == num)
5206                         return advanced_lookup[i].name;
5207
5208         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5209         return advanced_lookup[i].name;
5210 }
5211
5212 static void
5213 iwm_nic_umac_error(struct iwm_softc *sc)
5214 {
5215         struct iwm_umac_error_event_table table;
5216         uint32_t base;
5217
5218         base = sc->umac_error_event_table;
5219
5220         if (base < 0x800000) {
5221                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5222                     base);
5223                 return;
5224         }
5225
5226         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5227                 device_printf(sc->sc_dev, "reading errlog failed\n");
5228                 return;
5229         }
5230
5231         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5232                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5233                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5234                     sc->sc_flags, table.valid);
5235         }
5236
5237         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5238                 iwm_desc_lookup(table.error_id));
5239         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5240         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5241         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5242             table.ilink1);
5243         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5244             table.ilink2);
5245         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5246         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5247         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5248         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5249         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5250         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5251             table.frame_pointer);
5252         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5253             table.stack_pointer);
5254         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5255         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5256             table.nic_isr_pref);
5257 }
5258
5259 /*
5260  * Support for dumping the error log seemed like a good idea ...
5261  * but it's mostly hex junk and the only sensible thing is the
5262  * hw/ucode revision (which we know anyway).  Since it's here,
5263  * I'll just leave it in, just in case e.g. the Intel guys want to
5264  * help us decipher some "ADVANCED_SYSASSERT" later.
5265  */
5266 static void
5267 iwm_nic_error(struct iwm_softc *sc)
5268 {
5269         struct iwm_error_event_table table;
5270         uint32_t base;
5271
5272         device_printf(sc->sc_dev, "dumping device error log\n");
5273         base = sc->error_event_table;
5274         if (base < 0x800000) {
5275                 device_printf(sc->sc_dev,
5276                     "Invalid error log pointer 0x%08x\n", base);
5277                 return;
5278         }
5279
5280         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5281                 device_printf(sc->sc_dev, "reading errlog failed\n");
5282                 return;
5283         }
5284
5285         if (!table.valid) {
5286                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5287                 return;
5288         }
5289
5290         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5291                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5292                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5293                     sc->sc_flags, table.valid);
5294         }
5295
5296         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5297             iwm_desc_lookup(table.error_id));
5298         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5299             table.trm_hw_status0);
5300         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5301             table.trm_hw_status1);
5302         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5303         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5304         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5305         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5306         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5307         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5308         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5309         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5310         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5311         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5312         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5313         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5314             table.fw_rev_type);
5315         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5316         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5317         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5318         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5319         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5320         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5321         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5322         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5323         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5324         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5325         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5326         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5327         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5328         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5329         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5330         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5331         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5332         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5333         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5334
5335         if (sc->umac_error_event_table)
5336                 iwm_nic_umac_error(sc);
5337 }
5338 #endif
5339
5340 static void
5341 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5342 {
5343         struct ieee80211com *ic = &sc->sc_ic;
5344         struct iwm_cmd_response *cresp;
5345         struct mbuf *m1;
5346         uint32_t offset = 0;
5347         uint32_t maxoff = IWM_RBUF_SIZE;
5348         uint32_t nextoff;
5349         boolean_t stolen = FALSE;
5350
5351 #define HAVEROOM(a)     \
5352     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5353
5354         while (HAVEROOM(offset)) {
5355                 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5356                     offset);
5357                 int qid, idx, code, len;
5358
5359                 qid = pkt->hdr.qid;
5360                 idx = pkt->hdr.idx;
5361
5362                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5363
5364                 /*
5365                  * randomly get these from the firmware, no idea why.
5366                  * they at least seem harmless, so just ignore them for now
5367                  */
5368                 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5369                     pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5370                         break;
5371                 }
5372
5373                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5374                     "rx packet qid=%d idx=%d type=%x\n",
5375                     qid & ~0x80, pkt->hdr.idx, code);
5376
5377                 len = le32toh(pkt->len_n_flags) & IWM_FH_RSCSR_FRAME_SIZE_MSK;
5378                 len += sizeof(uint32_t); /* account for status word */
5379                 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5380
5381                 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5382
5383                 switch (code) {
5384                 case IWM_REPLY_RX_PHY_CMD:
5385                         iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5386                         break;
5387
5388                 case IWM_REPLY_RX_MPDU_CMD: {
5389                         /*
5390                          * If this is the last frame in the RX buffer, we
5391                          * can directly feed the mbuf to the sharks here.
5392                          */
5393                         struct iwm_rx_packet *nextpkt = mtodoff(m,
5394                             struct iwm_rx_packet *, nextoff);
5395                         if (!HAVEROOM(nextoff) ||
5396                             (nextpkt->hdr.code == 0 &&
5397                              (nextpkt->hdr.qid & ~0x80) == 0 &&
5398                              nextpkt->hdr.idx == 0) ||
5399                             (nextpkt->len_n_flags ==
5400                              htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5401                                 if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5402                                         stolen = FALSE;
5403                                         /* Make sure we abort the loop */
5404                                         nextoff = maxoff;
5405                                 }
5406                                 break;
5407                         }
5408
5409                         /*
5410                          * Use m_copym instead of m_split, because that
5411                          * makes it easier to keep a valid rx buffer in
5412                          * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5413                          *
5414                          * We need to start m_copym() at offset 0, to get the
5415                          * M_PKTHDR flag preserved.
5416                          */
5417                         m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5418                         if (m1) {
5419                                 if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5420                                         stolen = TRUE;
5421                                 else
5422                                         m_freem(m1);
5423                         }
5424                         break;
5425                 }
5426
5427                 case IWM_TX_CMD:
5428                         iwm_mvm_rx_tx_cmd(sc, pkt);
5429                         break;
5430
5431                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5432                         struct iwm_missed_beacons_notif *resp;
5433                         int missed;
5434
5435                         /* XXX look at mac_id to determine interface ID */
5436                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5437
5438                         resp = (void *)pkt->data;
5439                         missed = le32toh(resp->consec_missed_beacons);
5440
5441                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5442                             "%s: MISSED_BEACON: mac_id=%d, "
5443                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5444                             "num_rx=%d\n",
5445                             __func__,
5446                             le32toh(resp->mac_id),
5447                             le32toh(resp->consec_missed_beacons_since_last_rx),
5448                             le32toh(resp->consec_missed_beacons),
5449                             le32toh(resp->num_expected_beacons),
5450                             le32toh(resp->num_recvd_beacons));
5451
5452                         /* Be paranoid */
5453                         if (vap == NULL)
5454                                 break;
5455
5456                         /* XXX no net80211 locking? */
5457                         if (vap->iv_state == IEEE80211_S_RUN &&
5458                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5459                                 if (missed > vap->iv_bmissthreshold) {
5460                                         /* XXX bad locking; turn into task */
5461                                         IWM_UNLOCK(sc);
5462                                         ieee80211_beacon_miss(ic);
5463                                         IWM_LOCK(sc);
5464                                 }
5465                         }
5466
5467                         break;
5468                 }
5469
5470                 case IWM_MFUART_LOAD_NOTIFICATION:
5471                         break;
5472
5473                 case IWM_MVM_ALIVE:
5474                         break;
5475
5476                 case IWM_CALIB_RES_NOTIF_PHY_DB:
5477                         break;
5478
5479                 case IWM_STATISTICS_NOTIFICATION: {
5480                         struct iwm_notif_statistics *stats;
5481                         stats = (void *)pkt->data;
5482                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5483                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5484                         break;
5485                 }
5486
5487                 case IWM_NVM_ACCESS_CMD:
5488                 case IWM_MCC_UPDATE_CMD:
5489                         if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5490                                 memcpy(sc->sc_cmd_resp,
5491                                     pkt, sizeof(sc->sc_cmd_resp));
5492                         }
5493                         break;
5494
5495                 case IWM_MCC_CHUB_UPDATE_CMD: {
5496                         struct iwm_mcc_chub_notif *notif;
5497                         notif = (void *)pkt->data;
5498
5499                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5500                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5501                         sc->sc_fw_mcc[2] = '\0';
5502                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5503                             "fw source %d sent CC '%s'\n",
5504                             notif->source_id, sc->sc_fw_mcc);
5505                         break;
5506                 }
5507
5508                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5509                 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5510                                  IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5511                         struct iwm_dts_measurement_notif_v1 *notif;
5512
5513                         if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5514                                 device_printf(sc->sc_dev,
5515                                     "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5516                                 break;
5517                         }
5518                         notif = (void *)pkt->data;
5519                         IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5520                             "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5521                             notif->temp);
5522                         break;
5523                 }
5524
5525                 case IWM_PHY_CONFIGURATION_CMD:
5526                 case IWM_TX_ANT_CONFIGURATION_CMD:
5527                 case IWM_ADD_STA:
5528                 case IWM_MAC_CONTEXT_CMD:
5529                 case IWM_REPLY_SF_CFG_CMD:
5530                 case IWM_POWER_TABLE_CMD:
5531                 case IWM_PHY_CONTEXT_CMD:
5532                 case IWM_BINDING_CONTEXT_CMD:
5533                 case IWM_TIME_EVENT_CMD:
5534                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5535                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5536                 case IWM_SCAN_ABORT_UMAC:
5537                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5538                 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5539                 case IWM_REPLY_BEACON_FILTERING_CMD:
5540                 case IWM_MAC_PM_POWER_TABLE:
5541                 case IWM_TIME_QUOTA_CMD:
5542                 case IWM_REMOVE_STA:
5543                 case IWM_TXPATH_FLUSH:
5544                 case IWM_LQ_CMD:
5545                 case IWM_FW_PAGING_BLOCK_CMD:
5546                 case IWM_BT_CONFIG:
5547                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5548                         cresp = (void *)pkt->data;
5549                         if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5550                                 memcpy(sc->sc_cmd_resp,
5551                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5552                         }
5553                         break;
5554
5555                 /* ignore */
5556                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5557                         break;
5558
5559                 case IWM_INIT_COMPLETE_NOTIF:
5560                         break;
5561
5562                 case IWM_SCAN_OFFLOAD_COMPLETE:
5563                         iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5564                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5565                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5566                                 ieee80211_runtask(ic, &sc->sc_es_task);
5567                         }
5568                         break;
5569
5570                 case IWM_SCAN_ITERATION_COMPLETE: {
5571                         struct iwm_lmac_scan_complete_notif *notif;
5572                         notif = (void *)pkt->data;
5573                         break;
5574                 }
5575
5576                 case IWM_SCAN_COMPLETE_UMAC:
5577                         iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5578                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5579                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5580                                 ieee80211_runtask(ic, &sc->sc_es_task);
5581                         }
5582                         break;
5583
5584                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5585                         struct iwm_umac_scan_iter_complete_notif *notif;
5586                         notif = (void *)pkt->data;
5587
5588                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5589                             "complete, status=0x%x, %d channels scanned\n",
5590                             notif->status, notif->scanned_channels);
5591                         break;
5592                 }
5593
5594                 case IWM_REPLY_ERROR: {
5595                         struct iwm_error_resp *resp;
5596                         resp = (void *)pkt->data;
5597
5598                         device_printf(sc->sc_dev,
5599                             "firmware error 0x%x, cmd 0x%x\n",
5600                             le32toh(resp->error_type),
5601                             resp->cmd_id);
5602                         break;
5603                 }
5604
5605                 case IWM_TIME_EVENT_NOTIFICATION: {
5606                         struct iwm_time_event_notif *notif;
5607                         notif = (void *)pkt->data;
5608
5609                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5610                             "TE notif status = 0x%x action = 0x%x\n",
5611                             notif->status, notif->action);
5612                         break;
5613                 }
5614
5615                 case IWM_MCAST_FILTER_CMD:
5616                         break;
5617
5618                 case IWM_SCD_QUEUE_CFG: {
5619                         struct iwm_scd_txq_cfg_rsp *rsp;
5620                         rsp = (void *)pkt->data;
5621
5622                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5623                             "queue cfg token=0x%x sta_id=%d "
5624                             "tid=%d scd_queue=%d\n",
5625                             rsp->token, rsp->sta_id, rsp->tid,
5626                             rsp->scd_queue);
5627                         break;
5628                 }
5629
5630                 default:
5631                         device_printf(sc->sc_dev,
5632                             "frame %d/%d %x UNHANDLED (this should "
5633                             "not happen)\n", qid & ~0x80, idx,
5634                             pkt->len_n_flags);
5635                         break;
5636                 }
5637
5638                 /*
5639                  * Why test bit 0x80?  The Linux driver:
5640                  *
5641                  * There is one exception:  uCode sets bit 15 when it
5642                  * originates the response/notification, i.e. when the
5643                  * response/notification is not a direct response to a
5644                  * command sent by the driver.  For example, uCode issues
5645                  * IWM_REPLY_RX when it sends a received frame to the driver;
5646                  * it is not a direct response to any driver command.
5647                  *
5648                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5649                  * uses a slightly different format for pkt->hdr, and "qid"
5650                  * is actually the upper byte of a two-byte field.
5651                  */
5652                 if (!(qid & (1 << 7)))
5653                         iwm_cmd_done(sc, pkt);
5654
5655                 offset = nextoff;
5656         }
5657         if (stolen)
5658                 m_freem(m);
5659 #undef HAVEROOM
5660 }
5661
5662 /*
5663  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5664  * Basic structure from if_iwn
5665  */
5666 static void
5667 iwm_notif_intr(struct iwm_softc *sc)
5668 {
5669         uint16_t hw;
5670
5671         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5672             BUS_DMASYNC_POSTREAD);
5673
5674         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5675
5676         /*
5677          * Process responses
5678          */
5679         while (sc->rxq.cur != hw) {
5680                 struct iwm_rx_ring *ring = &sc->rxq;
5681                 struct iwm_rx_data *data = &ring->data[ring->cur];
5682
5683                 bus_dmamap_sync(ring->data_dmat, data->map,
5684                     BUS_DMASYNC_POSTREAD);
5685
5686                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5687                     "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5688                 iwm_handle_rxb(sc, data->m);
5689
5690                 ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5691         }
5692
5693         /*
5694          * Tell the firmware that it can reuse the ring entries that
5695          * we have just processed.
5696          * Seems like the hardware gets upset unless we align
5697          * the write by 8??
5698          */
5699         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5700         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5701 }
5702
5703 static void
5704 iwm_intr(void *arg)
5705 {
5706         struct iwm_softc *sc = arg;
5707         int handled = 0;
5708         int r1, r2, rv = 0;
5709         int isperiodic = 0;
5710
5711         IWM_LOCK(sc);
5712         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5713
5714         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5715                 uint32_t *ict = sc->ict_dma.vaddr;
5716                 int tmp;
5717
5718                 tmp = htole32(ict[sc->ict_cur]);
5719                 if (!tmp)
5720                         goto out_ena;
5721
5722                 /*
5723                  * ok, there was something.  keep plowing until we have all.
5724                  */
5725                 r1 = r2 = 0;
5726                 while (tmp) {
5727                         r1 |= tmp;
5728                         ict[sc->ict_cur] = 0;
5729                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5730                         tmp = htole32(ict[sc->ict_cur]);
5731                 }
5732
5733                 /* this is where the fun begins.  don't ask */
5734                 if (r1 == 0xffffffff)
5735                         r1 = 0;
5736
5737                 /* i am not expected to understand this */
5738                 if (r1 & 0xc0000)
5739                         r1 |= 0x8000;
5740                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5741         } else {
5742                 r1 = IWM_READ(sc, IWM_CSR_INT);
5743                 /* "hardware gone" (where, fishing?) */
5744                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5745                         goto out;
5746                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5747         }
5748         if (r1 == 0 && r2 == 0) {
5749                 goto out_ena;
5750         }
5751
5752         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5753
5754         /* Safely ignore these bits for debug checks below */
5755         r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5756
5757         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5758                 int i;
5759                 struct ieee80211com *ic = &sc->sc_ic;
5760                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5761
5762 #ifdef IWM_DEBUG
5763                 iwm_nic_error(sc);
5764 #endif
5765                 /* Dump driver status (TX and RX rings) while we're here. */
5766                 device_printf(sc->sc_dev, "driver status:\n");
5767                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5768                         struct iwm_tx_ring *ring = &sc->txq[i];
5769                         device_printf(sc->sc_dev,
5770                             "  tx ring %2d: qid=%-2d cur=%-3d "
5771                             "queued=%-3d\n",
5772                             i, ring->qid, ring->cur, ring->queued);
5773                 }
5774                 device_printf(sc->sc_dev,
5775                     "  rx ring: cur=%d\n", sc->rxq.cur);
5776                 device_printf(sc->sc_dev,
5777                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5778
5779                 /* Don't stop the device; just do a VAP restart */
5780                 IWM_UNLOCK(sc);
5781
5782                 if (vap == NULL) {
5783                         printf("%s: null vap\n", __func__);
5784                         return;
5785                 }
5786
5787                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5788                     "restarting\n", __func__, vap->iv_state);
5789
5790                 ieee80211_restart_all(ic);
5791                 return;
5792         }
5793
5794         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5795                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5796                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5797                 iwm_stop(sc);
5798                 rv = 1;
5799                 goto out;
5800         }
5801
5802         /* firmware chunk loaded */
5803         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5804                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5805                 handled |= IWM_CSR_INT_BIT_FH_TX;
5806                 sc->sc_fw_chunk_done = 1;
5807                 wakeup(&sc->sc_fw);
5808         }
5809
5810         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5811                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5812                 if (iwm_check_rfkill(sc)) {
5813                         device_printf(sc->sc_dev,
5814                             "%s: rfkill switch, disabling interface\n",
5815                             __func__);
5816                         iwm_stop(sc);
5817                 }
5818         }
5819
5820         /*
5821          * The Linux driver uses periodic interrupts to avoid races.
5822          * We cargo-cult like it's going out of fashion.
5823          */
5824         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5825                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5826                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5827                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5828                         IWM_WRITE_1(sc,
5829                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5830                 isperiodic = 1;
5831         }
5832
5833         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5834                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5835                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5836
5837                 iwm_notif_intr(sc);
5838
5839                 /* enable periodic interrupt, see above */
5840                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5841                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5842                             IWM_CSR_INT_PERIODIC_ENA);
5843         }
5844
5845         if (__predict_false(r1 & ~handled))
5846                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5847                     "%s: unhandled interrupts: %x\n", __func__, r1);
5848         rv = 1;
5849
5850  out_ena:
5851         iwm_restore_interrupts(sc);
5852  out:
5853         IWM_UNLOCK(sc);
5854         return;
5855 }
5856
5857 /*
5858  * Autoconf glue-sniffing
5859  */
5860 #define PCI_VENDOR_INTEL                0x8086
5861 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5862 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5863 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5864 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5865 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5866 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5867 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5868 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5869 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5870 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5871
5872 static const struct iwm_devices {
5873         uint16_t                device;
5874         const struct iwm_cfg    *cfg;
5875 } iwm_devices[] = {
5876         { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5877         { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5878         { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5879         { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5880         { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5881         { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5882         { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5883         { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5884         { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5885         { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5886 };
5887
5888 static int
5889 iwm_probe(device_t dev)
5890 {
5891         int i;
5892
5893         for (i = 0; i < nitems(iwm_devices); i++) {
5894                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5895                     pci_get_device(dev) == iwm_devices[i].device) {
5896                         device_set_desc(dev, iwm_devices[i].cfg->name);
5897                         return (BUS_PROBE_DEFAULT);
5898                 }
5899         }
5900
5901         return (ENXIO);
5902 }
5903
5904 static int
5905 iwm_dev_check(device_t dev)
5906 {
5907         struct iwm_softc *sc;
5908         uint16_t devid;
5909         int i;
5910
5911         sc = device_get_softc(dev);
5912
5913         devid = pci_get_device(dev);
5914         for (i = 0; i < nitems(iwm_devices); i++) {
5915                 if (iwm_devices[i].device == devid) {
5916                         sc->cfg = iwm_devices[i].cfg;
5917                         return (0);
5918                 }
5919         }
5920         device_printf(dev, "unknown adapter type\n");
5921         return ENXIO;
5922 }
5923
5924 /* PCI registers */
5925 #define PCI_CFG_RETRY_TIMEOUT   0x041
5926
5927 static int
5928 iwm_pci_attach(device_t dev)
5929 {
5930         struct iwm_softc *sc;
5931         int count, error, rid;
5932         uint16_t reg;
5933
5934         sc = device_get_softc(dev);
5935
5936         /* We disable the RETRY_TIMEOUT register (0x41) to keep
5937          * PCI Tx retries from interfering with C3 CPU state */
5938         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5939
5940         /* Enable bus-mastering and hardware bug workaround. */
5941         pci_enable_busmaster(dev);
5942         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5943         /* if !MSI */
5944         if (reg & PCIM_STATUS_INTxSTATE) {
5945                 reg &= ~PCIM_STATUS_INTxSTATE;
5946         }
5947         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5948
5949         rid = PCIR_BAR(0);
5950         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5951             RF_ACTIVE);
5952         if (sc->sc_mem == NULL) {
5953                 device_printf(sc->sc_dev, "can't map mem space\n");
5954                 return (ENXIO);
5955         }
5956         sc->sc_st = rman_get_bustag(sc->sc_mem);
5957         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5958
5959         /* Install interrupt handler. */
5960         count = 1;
5961         rid = 0;
5962         if (pci_alloc_msi(dev, &count) == 0)
5963                 rid = 1;
5964         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5965             (rid != 0 ? 0 : RF_SHAREABLE));
5966         if (sc->sc_irq == NULL) {
5967                 device_printf(dev, "can't map interrupt\n");
5968                         return (ENXIO);
5969         }
5970         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5971             NULL, iwm_intr, sc, &sc->sc_ih);
5972         if (sc->sc_ih == NULL) {
5973                 device_printf(dev, "can't establish interrupt");
5974                         return (ENXIO);
5975         }
5976         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5977
5978         return (0);
5979 }
5980
5981 static void
5982 iwm_pci_detach(device_t dev)
5983 {
5984         struct iwm_softc *sc = device_get_softc(dev);
5985
5986         if (sc->sc_irq != NULL) {
5987                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5988                 bus_release_resource(dev, SYS_RES_IRQ,
5989                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5990                 pci_release_msi(dev);
5991         }
5992         if (sc->sc_mem != NULL)
5993                 bus_release_resource(dev, SYS_RES_MEMORY,
5994                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5995 }
5996
5997
5998
5999 static int
6000 iwm_attach(device_t dev)
6001 {
6002         struct iwm_softc *sc = device_get_softc(dev);
6003         struct ieee80211com *ic = &sc->sc_ic;
6004         int error;
6005         int txq_i, i;
6006
6007         sc->sc_dev = dev;
6008         sc->sc_attached = 1;
6009         IWM_LOCK_INIT(sc);
6010         mbufq_init(&sc->sc_snd, ifqmaxlen);
6011         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
6012         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
6013         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
6014
6015         sc->sc_notif_wait = iwm_notification_wait_init(sc);
6016         if (sc->sc_notif_wait == NULL) {
6017                 device_printf(dev, "failed to init notification wait struct\n");
6018                 goto fail;
6019         }
6020
6021         /* Init phy db */
6022         sc->sc_phy_db = iwm_phy_db_init(sc);
6023         if (!sc->sc_phy_db) {
6024                 device_printf(dev, "Cannot init phy_db\n");
6025                 goto fail;
6026         }
6027
6028         /* Set EBS as successful as long as not stated otherwise by the FW. */
6029         sc->last_ebs_successful = TRUE;
6030
6031         /* PCI attach */
6032         error = iwm_pci_attach(dev);
6033         if (error != 0)
6034                 goto fail;
6035
6036         sc->sc_wantresp = -1;
6037
6038         /* Check device type */
6039         error = iwm_dev_check(dev);
6040         if (error != 0)
6041                 goto fail;
6042
6043         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6044         /*
6045          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6046          * changed, and now the revision step also includes bit 0-1 (no more
6047          * "dash" value). To keep hw_rev backwards compatible - we'll store it
6048          * in the old format.
6049          */
6050         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6051                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6052                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6053
6054         if (iwm_prepare_card_hw(sc) != 0) {
6055                 device_printf(dev, "could not initialize hardware\n");
6056                 goto fail;
6057         }
6058
6059         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6060                 int ret;
6061                 uint32_t hw_step;
6062
6063                 /*
6064                  * In order to recognize C step the driver should read the
6065                  * chip version id located at the AUX bus MISC address.
6066                  */
6067                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6068                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6069                 DELAY(2);
6070
6071                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6072                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6073                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6074                                    25000);
6075                 if (!ret) {
6076                         device_printf(sc->sc_dev,
6077                             "Failed to wake up the nic\n");
6078                         goto fail;
6079                 }
6080
6081                 if (iwm_nic_lock(sc)) {
6082                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6083                         hw_step |= IWM_ENABLE_WFPM;
6084                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6085                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6086                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6087                         if (hw_step == 0x3)
6088                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6089                                                 (IWM_SILICON_C_STEP << 2);
6090                         iwm_nic_unlock(sc);
6091                 } else {
6092                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
6093                         goto fail;
6094                 }
6095         }
6096
6097         /* special-case 7265D, it has the same PCI IDs. */
6098         if (sc->cfg == &iwm7265_cfg &&
6099             (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6100                 sc->cfg = &iwm7265d_cfg;
6101         }
6102
6103         /* Allocate DMA memory for firmware transfers. */
6104         if ((error = iwm_alloc_fwmem(sc)) != 0) {
6105                 device_printf(dev, "could not allocate memory for firmware\n");
6106                 goto fail;
6107         }
6108
6109         /* Allocate "Keep Warm" page. */
6110         if ((error = iwm_alloc_kw(sc)) != 0) {
6111                 device_printf(dev, "could not allocate keep warm page\n");
6112                 goto fail;
6113         }
6114
6115         /* We use ICT interrupts */
6116         if ((error = iwm_alloc_ict(sc)) != 0) {
6117                 device_printf(dev, "could not allocate ICT table\n");
6118                 goto fail;
6119         }
6120
6121         /* Allocate TX scheduler "rings". */
6122         if ((error = iwm_alloc_sched(sc)) != 0) {
6123                 device_printf(dev, "could not allocate TX scheduler rings\n");
6124                 goto fail;
6125         }
6126
6127         /* Allocate TX rings */
6128         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6129                 if ((error = iwm_alloc_tx_ring(sc,
6130                     &sc->txq[txq_i], txq_i)) != 0) {
6131                         device_printf(dev,
6132                             "could not allocate TX ring %d\n",
6133                             txq_i);
6134                         goto fail;
6135                 }
6136         }
6137
6138         /* Allocate RX ring. */
6139         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6140                 device_printf(dev, "could not allocate RX ring\n");
6141                 goto fail;
6142         }
6143
6144         /* Clear pending interrupts. */
6145         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6146
6147         ic->ic_softc = sc;
6148         ic->ic_name = device_get_nameunit(sc->sc_dev);
6149         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
6150         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
6151
6152         /* Set device capabilities. */
6153         ic->ic_caps =
6154             IEEE80211_C_STA |
6155             IEEE80211_C_WPA |           /* WPA/RSN */
6156             IEEE80211_C_WME |
6157             IEEE80211_C_PMGT |
6158             IEEE80211_C_SHSLOT |        /* short slot time supported */
6159             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
6160 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
6161             ;
6162         /* Advertise full-offload scanning */
6163         ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6164         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6165                 sc->sc_phyctxt[i].id = i;
6166                 sc->sc_phyctxt[i].color = 0;
6167                 sc->sc_phyctxt[i].ref = 0;
6168                 sc->sc_phyctxt[i].channel = NULL;
6169         }
6170
6171         /* Default noise floor */
6172         sc->sc_noise = -96;
6173
6174         /* Max RSSI */
6175         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6176
6177         sc->sc_preinit_hook.ich_func = iwm_preinit;
6178         sc->sc_preinit_hook.ich_arg = sc;
6179         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6180                 device_printf(dev, "config_intrhook_establish failed\n");
6181                 goto fail;
6182         }
6183
6184 #ifdef IWM_DEBUG
6185         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6186             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6187             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6188 #endif
6189
6190         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6191             "<-%s\n", __func__);
6192
6193         return 0;
6194
6195         /* Free allocated memory if something failed during attachment. */
6196 fail:
6197         iwm_detach_local(sc, 0);
6198
6199         return ENXIO;
6200 }
6201
6202 static int
6203 iwm_is_valid_ether_addr(uint8_t *addr)
6204 {
6205         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6206
6207         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6208                 return (FALSE);
6209
6210         return (TRUE);
6211 }
6212
6213 static int
6214 iwm_wme_update(struct ieee80211com *ic)
6215 {
6216 #define IWM_EXP2(x)     ((1 << (x)) - 1)        /* CWmin = 2^ECWmin - 1 */
6217         struct iwm_softc *sc = ic->ic_softc;
6218         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6219         struct iwm_vap *ivp = IWM_VAP(vap);
6220         struct iwm_node *in;
6221         struct wmeParams tmp[WME_NUM_AC];
6222         int aci, error;
6223
6224         if (vap == NULL)
6225                 return (0);
6226
6227         IEEE80211_LOCK(ic);
6228         for (aci = 0; aci < WME_NUM_AC; aci++)
6229                 tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6230         IEEE80211_UNLOCK(ic);
6231
6232         IWM_LOCK(sc);
6233         for (aci = 0; aci < WME_NUM_AC; aci++) {
6234                 const struct wmeParams *ac = &tmp[aci];
6235                 ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6236                 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6237                 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6238                 ivp->queue_params[aci].edca_txop =
6239                     IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6240         }
6241         ivp->have_wme = TRUE;
6242         if (ivp->is_uploaded && vap->iv_bss != NULL) {
6243                 in = IWM_NODE(vap->iv_bss);
6244                 if (in->in_assoc) {
6245                         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6246                                 device_printf(sc->sc_dev,
6247                                     "%s: failed to update MAC\n", __func__);
6248                         }
6249                 }
6250         }
6251         IWM_UNLOCK(sc);
6252
6253         return (0);
6254 #undef IWM_EXP2
6255 }
6256
6257 static void
6258 iwm_preinit(void *arg)
6259 {
6260         struct iwm_softc *sc = arg;
6261         device_t dev = sc->sc_dev;
6262         struct ieee80211com *ic = &sc->sc_ic;
6263         int error;
6264
6265         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6266             "->%s\n", __func__);
6267
6268         IWM_LOCK(sc);
6269         if ((error = iwm_start_hw(sc)) != 0) {
6270                 device_printf(dev, "could not initialize hardware\n");
6271                 IWM_UNLOCK(sc);
6272                 goto fail;
6273         }
6274
6275         error = iwm_run_init_mvm_ucode(sc, 1);
6276         iwm_stop_device(sc);
6277         if (error) {
6278                 IWM_UNLOCK(sc);
6279                 goto fail;
6280         }
6281         device_printf(dev,
6282             "hw rev 0x%x, fw ver %s, address %s\n",
6283             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6284             sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6285
6286         /* not all hardware can do 5GHz band */
6287         if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6288                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6289                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6290         IWM_UNLOCK(sc);
6291
6292         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6293             ic->ic_channels);
6294
6295         /*
6296          * At this point we've committed - if we fail to do setup,
6297          * we now also have to tear down the net80211 state.
6298          */
6299         ieee80211_ifattach(ic);
6300         ic->ic_vap_create = iwm_vap_create;
6301         ic->ic_vap_delete = iwm_vap_delete;
6302         ic->ic_raw_xmit = iwm_raw_xmit;
6303         ic->ic_node_alloc = iwm_node_alloc;
6304         ic->ic_scan_start = iwm_scan_start;
6305         ic->ic_scan_end = iwm_scan_end;
6306         ic->ic_update_mcast = iwm_update_mcast;
6307         ic->ic_getradiocaps = iwm_init_channel_map;
6308         ic->ic_set_channel = iwm_set_channel;
6309         ic->ic_scan_curchan = iwm_scan_curchan;
6310         ic->ic_scan_mindwell = iwm_scan_mindwell;
6311         ic->ic_wme.wme_update = iwm_wme_update;
6312         ic->ic_parent = iwm_parent;
6313         ic->ic_transmit = iwm_transmit;
6314         iwm_radiotap_attach(sc);
6315         if (bootverbose)
6316                 ieee80211_announce(ic);
6317
6318         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6319             "<-%s\n", __func__);
6320         config_intrhook_disestablish(&sc->sc_preinit_hook);
6321
6322         return;
6323 fail:
6324         config_intrhook_disestablish(&sc->sc_preinit_hook);
6325         iwm_detach_local(sc, 0);
6326 }
6327
6328 /*
6329  * Attach the interface to 802.11 radiotap.
6330  */
6331 static void
6332 iwm_radiotap_attach(struct iwm_softc *sc)
6333 {
6334         struct ieee80211com *ic = &sc->sc_ic;
6335
6336         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6337             "->%s begin\n", __func__);
6338         ieee80211_radiotap_attach(ic,
6339             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6340                 IWM_TX_RADIOTAP_PRESENT,
6341             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6342                 IWM_RX_RADIOTAP_PRESENT);
6343         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6344             "->%s end\n", __func__);
6345 }
6346
6347 static struct ieee80211vap *
6348 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6349     enum ieee80211_opmode opmode, int flags,
6350     const uint8_t bssid[IEEE80211_ADDR_LEN],
6351     const uint8_t mac[IEEE80211_ADDR_LEN])
6352 {
6353         struct iwm_vap *ivp;
6354         struct ieee80211vap *vap;
6355
6356         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6357                 return NULL;
6358         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6359         vap = &ivp->iv_vap;
6360         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6361         vap->iv_bmissthreshold = 10;            /* override default */
6362         /* Override with driver methods. */
6363         ivp->iv_newstate = vap->iv_newstate;
6364         vap->iv_newstate = iwm_newstate;
6365
6366         ivp->id = IWM_DEFAULT_MACID;
6367         ivp->color = IWM_DEFAULT_COLOR;
6368
6369         ivp->have_wme = FALSE;
6370
6371         ieee80211_ratectl_init(vap);
6372         /* Complete setup. */
6373         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6374             mac);
6375         ic->ic_opmode = opmode;
6376
6377         return vap;
6378 }
6379
6380 static void
6381 iwm_vap_delete(struct ieee80211vap *vap)
6382 {
6383         struct iwm_vap *ivp = IWM_VAP(vap);
6384
6385         ieee80211_ratectl_deinit(vap);
6386         ieee80211_vap_detach(vap);
6387         free(ivp, M_80211_VAP);
6388 }
6389
6390 static void
6391 iwm_scan_start(struct ieee80211com *ic)
6392 {
6393         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6394         struct iwm_softc *sc = ic->ic_softc;
6395         int error;
6396
6397         IWM_LOCK(sc);
6398         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6399                 /* This should not be possible */
6400                 device_printf(sc->sc_dev,
6401                     "%s: Previous scan not completed yet\n", __func__);
6402         }
6403         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6404                 error = iwm_mvm_umac_scan(sc);
6405         else
6406                 error = iwm_mvm_lmac_scan(sc);
6407         if (error != 0) {
6408                 device_printf(sc->sc_dev, "could not initiate scan\n");
6409                 IWM_UNLOCK(sc);
6410                 ieee80211_cancel_scan(vap);
6411         } else {
6412                 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6413                 iwm_led_blink_start(sc);
6414                 IWM_UNLOCK(sc);
6415         }
6416 }
6417
6418 static void
6419 iwm_scan_end(struct ieee80211com *ic)
6420 {
6421         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6422         struct iwm_softc *sc = ic->ic_softc;
6423
6424         IWM_LOCK(sc);
6425         iwm_led_blink_stop(sc);
6426         if (vap->iv_state == IEEE80211_S_RUN)
6427                 iwm_mvm_led_enable(sc);
6428         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6429                 /*
6430                  * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6431                  * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6432                  * taskqueue.
6433                  */
6434                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6435                 iwm_mvm_scan_stop_wait(sc);
6436         }
6437         IWM_UNLOCK(sc);
6438
6439         /*
6440          * Make sure we don't race, if sc_es_task is still enqueued here.
6441          * This is to make sure that it won't call ieee80211_scan_done
6442          * when we have already started the next scan.
6443          */
6444         taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6445 }
6446
6447 static void
6448 iwm_update_mcast(struct ieee80211com *ic)
6449 {
6450 }
6451
6452 static void
6453 iwm_set_channel(struct ieee80211com *ic)
6454 {
6455 }
6456
6457 static void
6458 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6459 {
6460 }
6461
6462 static void
6463 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6464 {
6465         return;
6466 }
6467
6468 void
6469 iwm_init_task(void *arg1)
6470 {
6471         struct iwm_softc *sc = arg1;
6472
6473         IWM_LOCK(sc);
6474         while (sc->sc_flags & IWM_FLAG_BUSY)
6475                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6476         sc->sc_flags |= IWM_FLAG_BUSY;
6477         iwm_stop(sc);
6478         if (sc->sc_ic.ic_nrunning > 0)
6479                 iwm_init(sc);
6480         sc->sc_flags &= ~IWM_FLAG_BUSY;
6481         wakeup(&sc->sc_flags);
6482         IWM_UNLOCK(sc);
6483 }
6484
6485 static int
6486 iwm_resume(device_t dev)
6487 {
6488         struct iwm_softc *sc = device_get_softc(dev);
6489         int do_reinit = 0;
6490
6491         /*
6492          * We disable the RETRY_TIMEOUT register (0x41) to keep
6493          * PCI Tx retries from interfering with C3 CPU state.
6494          */
6495         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6496         iwm_init_task(device_get_softc(dev));
6497
6498         IWM_LOCK(sc);
6499         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6500                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6501                 do_reinit = 1;
6502         }
6503         IWM_UNLOCK(sc);
6504
6505         if (do_reinit)
6506                 ieee80211_resume_all(&sc->sc_ic);
6507
6508         return 0;
6509 }
6510
6511 static int
6512 iwm_suspend(device_t dev)
6513 {
6514         int do_stop = 0;
6515         struct iwm_softc *sc = device_get_softc(dev);
6516
6517         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6518
6519         ieee80211_suspend_all(&sc->sc_ic);
6520
6521         if (do_stop) {
6522                 IWM_LOCK(sc);
6523                 iwm_stop(sc);
6524                 sc->sc_flags |= IWM_FLAG_SCANNING;
6525                 IWM_UNLOCK(sc);
6526         }
6527
6528         return (0);
6529 }
6530
6531 static int
6532 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6533 {
6534         struct iwm_fw_info *fw = &sc->sc_fw;
6535         device_t dev = sc->sc_dev;
6536         int i;
6537
6538         if (!sc->sc_attached)
6539                 return 0;
6540         sc->sc_attached = 0;
6541
6542         if (do_net80211)
6543                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6544
6545         callout_drain(&sc->sc_led_blink_to);
6546         callout_drain(&sc->sc_watchdog_to);
6547         iwm_stop_device(sc);
6548         if (do_net80211) {
6549                 ieee80211_ifdetach(&sc->sc_ic);
6550         }
6551
6552         iwm_phy_db_free(sc->sc_phy_db);
6553         sc->sc_phy_db = NULL;
6554
6555         iwm_free_nvm_data(sc->nvm_data);
6556
6557         /* Free descriptor rings */
6558         iwm_free_rx_ring(sc, &sc->rxq);
6559         for (i = 0; i < nitems(sc->txq); i++)
6560                 iwm_free_tx_ring(sc, &sc->txq[i]);
6561
6562         /* Free firmware */
6563         if (fw->fw_fp != NULL)
6564                 iwm_fw_info_free(fw);
6565
6566         /* Free scheduler */
6567         iwm_dma_contig_free(&sc->sched_dma);
6568         iwm_dma_contig_free(&sc->ict_dma);
6569         iwm_dma_contig_free(&sc->kw_dma);
6570         iwm_dma_contig_free(&sc->fw_dma);
6571
6572         iwm_free_fw_paging(sc);
6573
6574         /* Finished with the hardware - detach things */
6575         iwm_pci_detach(dev);
6576
6577         if (sc->sc_notif_wait != NULL) {
6578                 iwm_notification_wait_free(sc->sc_notif_wait);
6579                 sc->sc_notif_wait = NULL;
6580         }
6581
6582         mbufq_drain(&sc->sc_snd);
6583         IWM_LOCK_DESTROY(sc);
6584
6585         return (0);
6586 }
6587
6588 static int
6589 iwm_detach(device_t dev)
6590 {
6591         struct iwm_softc *sc = device_get_softc(dev);
6592
6593         return (iwm_detach_local(sc, 1));
6594 }
6595
6596 static device_method_t iwm_pci_methods[] = {
6597         /* Device interface */
6598         DEVMETHOD(device_probe,         iwm_probe),
6599         DEVMETHOD(device_attach,        iwm_attach),
6600         DEVMETHOD(device_detach,        iwm_detach),
6601         DEVMETHOD(device_suspend,       iwm_suspend),
6602         DEVMETHOD(device_resume,        iwm_resume),
6603
6604         DEVMETHOD_END
6605 };
6606
6607 static driver_t iwm_pci_driver = {
6608         "iwm",
6609         iwm_pci_methods,
6610         sizeof (struct iwm_softc)
6611 };
6612
6613 static devclass_t iwm_devclass;
6614
6615 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6616 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6617 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6618 MODULE_DEPEND(iwm, wlan, 1, 1, 1);