]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
Update mandoc to 20160116
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.39 2015/03/23 00:35:19 jsg Exp $ */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include <sys/param.h>
109 #include <sys/bus.h>
110 #include <sys/conf.h>
111 #include <sys/endian.h>
112 #include <sys/firmware.h>
113 #include <sys/kernel.h>
114 #include <sys/malloc.h>
115 #include <sys/mbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/module.h>
118 #include <sys/proc.h>
119 #include <sys/rman.h>
120 #include <sys/socket.h>
121 #include <sys/sockio.h>
122 #include <sys/sysctl.h>
123 #include <sys/linker.h>
124
125 #include <machine/bus.h>
126 #include <machine/endian.h>
127 #include <machine/resource.h>
128
129 #include <dev/pci/pcivar.h>
130 #include <dev/pci/pcireg.h>
131
132 #include <net/bpf.h>
133
134 #include <net/if.h>
135 #include <net/if_var.h>
136 #include <net/if_arp.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 #include <net/if_types.h>
140
141 #include <netinet/in.h>
142 #include <netinet/in_systm.h>
143 #include <netinet/if_ether.h>
144 #include <netinet/ip.h>
145
146 #include <net80211/ieee80211_var.h>
147 #include <net80211/ieee80211_regdomain.h>
148 #include <net80211/ieee80211_ratectl.h>
149 #include <net80211/ieee80211_radiotap.h>
150
151 #include <dev/iwm/if_iwmreg.h>
152 #include <dev/iwm/if_iwmvar.h>
153 #include <dev/iwm/if_iwm_debug.h>
154 #include <dev/iwm/if_iwm_util.h>
155 #include <dev/iwm/if_iwm_binding.h>
156 #include <dev/iwm/if_iwm_phy_db.h>
157 #include <dev/iwm/if_iwm_mac_ctxt.h>
158 #include <dev/iwm/if_iwm_phy_ctxt.h>
159 #include <dev/iwm/if_iwm_time_event.h>
160 #include <dev/iwm/if_iwm_power.h>
161 #include <dev/iwm/if_iwm_scan.h>
162
163 #include <dev/iwm/if_iwm_pcie_trans.h>
164
165 const uint8_t iwm_nvm_channels[] = {
166         /* 2.4 GHz */
167         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
168         /* 5 GHz */
169         36, 40, 44 , 48, 52, 56, 60, 64,
170         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
171         149, 153, 157, 161, 165
172 };
173 #define IWM_NUM_2GHZ_CHANNELS   14
174
175 /*
176  * XXX For now, there's simply a fixed set of rate table entries
177  * that are populated.
178  */
179 const struct iwm_rate {
180         uint8_t rate;
181         uint8_t plcp;
182 } iwm_rates[] = {
183         {   2,  IWM_RATE_1M_PLCP  },
184         {   4,  IWM_RATE_2M_PLCP  },
185         {  11,  IWM_RATE_5M_PLCP  },
186         {  22,  IWM_RATE_11M_PLCP },
187         {  12,  IWM_RATE_6M_PLCP  },
188         {  18,  IWM_RATE_9M_PLCP  },
189         {  24,  IWM_RATE_12M_PLCP },
190         {  36,  IWM_RATE_18M_PLCP },
191         {  48,  IWM_RATE_24M_PLCP },
192         {  72,  IWM_RATE_36M_PLCP },
193         {  96,  IWM_RATE_48M_PLCP },
194         { 108,  IWM_RATE_54M_PLCP },
195 };
196 #define IWM_RIDX_CCK    0
197 #define IWM_RIDX_OFDM   4
198 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
199 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
200 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
201
202 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
203 static int      iwm_firmware_store_section(struct iwm_softc *,
204                                            enum iwm_ucode_type,
205                                            const uint8_t *, size_t);
206 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
207 static void     iwm_fw_info_free(struct iwm_fw_info *);
208 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
209 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
210 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
211                                      bus_size_t, bus_size_t);
212 static void     iwm_dma_contig_free(struct iwm_dma_info *);
213 static int      iwm_alloc_fwmem(struct iwm_softc *);
214 static void     iwm_free_fwmem(struct iwm_softc *);
215 static int      iwm_alloc_sched(struct iwm_softc *);
216 static void     iwm_free_sched(struct iwm_softc *);
217 static int      iwm_alloc_kw(struct iwm_softc *);
218 static void     iwm_free_kw(struct iwm_softc *);
219 static int      iwm_alloc_ict(struct iwm_softc *);
220 static void     iwm_free_ict(struct iwm_softc *);
221 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
222 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
223 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
224 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
225                                   int);
226 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
227 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
228 static void     iwm_enable_interrupts(struct iwm_softc *);
229 static void     iwm_restore_interrupts(struct iwm_softc *);
230 static void     iwm_disable_interrupts(struct iwm_softc *);
231 static void     iwm_ict_reset(struct iwm_softc *);
232 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
233 static void     iwm_stop_device(struct iwm_softc *);
234 static void     iwm_mvm_nic_config(struct iwm_softc *);
235 static int      iwm_nic_rx_init(struct iwm_softc *);
236 static int      iwm_nic_tx_init(struct iwm_softc *);
237 static int      iwm_nic_init(struct iwm_softc *);
238 static void     iwm_enable_txq(struct iwm_softc *, int, int);
239 static int      iwm_post_alive(struct iwm_softc *);
240 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
241                                    uint16_t, uint8_t *, uint16_t *);
242 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
243                                      uint16_t *);
244 static void     iwm_init_channel_map(struct iwm_softc *,
245                                      const uint16_t * const);
246 static int      iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
247                                    const uint16_t *, const uint16_t *, uint8_t,
248                                    uint8_t);
249 struct iwm_nvm_section;
250 static int      iwm_parse_nvm_sections(struct iwm_softc *,
251                                        struct iwm_nvm_section *);
252 static int      iwm_nvm_init(struct iwm_softc *);
253 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
254                                         const uint8_t *, uint32_t);
255 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
256 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
257 static int      iwm_fw_alive(struct iwm_softc *, uint32_t);
258 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
259 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
260 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
261                                               enum iwm_ucode_type);
262 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
263 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
264 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
265 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
266                                             struct iwm_rx_phy_info *);
267 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
268                                       struct iwm_rx_packet *,
269                                       struct iwm_rx_data *);
270 static int      iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
271 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
272                                    struct iwm_rx_data *);
273 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
274                                          struct iwm_rx_packet *,
275                                          struct iwm_node *);
276 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
277                                   struct iwm_rx_data *);
278 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
279 #if 0
280 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
281                                  uint16_t);
282 #endif
283 static const struct iwm_rate *
284         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
285                         struct ieee80211_frame *, struct iwm_tx_cmd *);
286 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
287                        struct ieee80211_node *, int);
288 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
289                              const struct ieee80211_bpf_params *);
290 static void     iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
291                                              struct iwm_mvm_add_sta_cmd_v5 *);
292 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
293                                                 struct iwm_mvm_add_sta_cmd_v6 *,
294                                                 int *);
295 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
296                                        int);
297 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
298 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
299 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
300                                            struct iwm_int_sta *,
301                                            const uint8_t *, uint16_t, uint16_t);
302 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
303 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
304 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
305 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
306 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
307 static struct ieee80211_node *
308                 iwm_node_alloc(struct ieee80211vap *,
309                                const uint8_t[IEEE80211_ADDR_LEN]);
310 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
311 static int      iwm_media_change(struct ifnet *);
312 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
313 static void     iwm_endscan_cb(void *, int);
314 static int      iwm_init_hw(struct iwm_softc *);
315 static void     iwm_init(struct iwm_softc *);
316 static void     iwm_start(struct iwm_softc *);
317 static void     iwm_stop(struct iwm_softc *);
318 static void     iwm_watchdog(void *);
319 static void     iwm_parent(struct ieee80211com *);
320 #ifdef IWM_DEBUG
321 static const char *
322                 iwm_desc_lookup(uint32_t);
323 static void     iwm_nic_error(struct iwm_softc *);
324 #endif
325 static void     iwm_notif_intr(struct iwm_softc *);
326 static void     iwm_intr(void *);
327 static int      iwm_attach(device_t);
328 static void     iwm_preinit(void *);
329 static int      iwm_detach_local(struct iwm_softc *sc, int);
330 static void     iwm_init_task(void *);
331 static void     iwm_radiotap_attach(struct iwm_softc *);
332 static struct ieee80211vap *
333                 iwm_vap_create(struct ieee80211com *,
334                                const char [IFNAMSIZ], int,
335                                enum ieee80211_opmode, int,
336                                const uint8_t [IEEE80211_ADDR_LEN],
337                                const uint8_t [IEEE80211_ADDR_LEN]);
338 static void     iwm_vap_delete(struct ieee80211vap *);
339 static void     iwm_scan_start(struct ieee80211com *);
340 static void     iwm_scan_end(struct ieee80211com *);
341 static void     iwm_update_mcast(struct ieee80211com *);
342 static void     iwm_set_channel(struct ieee80211com *);
343 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
344 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
345 static int      iwm_detach(device_t);
346
347 /*
348  * Firmware parser.
349  */
350
351 static int
352 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
353 {
354         const struct iwm_fw_cscheme_list *l = (const void *)data;
355
356         if (dlen < sizeof(*l) ||
357             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
358                 return EINVAL;
359
360         /* we don't actually store anything for now, always use s/w crypto */
361
362         return 0;
363 }
364
365 static int
366 iwm_firmware_store_section(struct iwm_softc *sc,
367     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
368 {
369         struct iwm_fw_sects *fws;
370         struct iwm_fw_onesect *fwone;
371
372         if (type >= IWM_UCODE_TYPE_MAX)
373                 return EINVAL;
374         if (dlen < sizeof(uint32_t))
375                 return EINVAL;
376
377         fws = &sc->sc_fw.fw_sects[type];
378         if (fws->fw_count >= IWM_UCODE_SECT_MAX)
379                 return EINVAL;
380
381         fwone = &fws->fw_sect[fws->fw_count];
382
383         /* first 32bit are device load offset */
384         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
385
386         /* rest is data */
387         fwone->fws_data = data + sizeof(uint32_t);
388         fwone->fws_len = dlen - sizeof(uint32_t);
389
390         fws->fw_count++;
391         fws->fw_totlen += fwone->fws_len;
392
393         return 0;
394 }
395
396 /* iwlwifi: iwl-drv.c */
397 struct iwm_tlv_calib_data {
398         uint32_t ucode_type;
399         struct iwm_tlv_calib_ctrl calib;
400 } __packed;
401
402 static int
403 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
404 {
405         const struct iwm_tlv_calib_data *def_calib = data;
406         uint32_t ucode_type = le32toh(def_calib->ucode_type);
407
408         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
409                 device_printf(sc->sc_dev,
410                     "Wrong ucode_type %u for default "
411                     "calibration.\n", ucode_type);
412                 return EINVAL;
413         }
414
415         sc->sc_default_calib[ucode_type].flow_trigger =
416             def_calib->calib.flow_trigger;
417         sc->sc_default_calib[ucode_type].event_trigger =
418             def_calib->calib.event_trigger;
419
420         return 0;
421 }
422
423 static void
424 iwm_fw_info_free(struct iwm_fw_info *fw)
425 {
426         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
427         fw->fw_fp = NULL;
428         /* don't touch fw->fw_status */
429         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
430 }
431
432 static int
433 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
434 {
435         struct iwm_fw_info *fw = &sc->sc_fw;
436         const struct iwm_tlv_ucode_header *uhdr;
437         struct iwm_ucode_tlv tlv;
438         enum iwm_ucode_tlv_type tlv_type;
439         const struct firmware *fwp;
440         const uint8_t *data;
441         int error = 0;
442         size_t len;
443
444         if (fw->fw_status == IWM_FW_STATUS_DONE &&
445             ucode_type != IWM_UCODE_TYPE_INIT)
446                 return 0;
447
448         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
449                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
450         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
451
452         if (fw->fw_fp != NULL)
453                 iwm_fw_info_free(fw);
454
455         /*
456          * Load firmware into driver memory.
457          * fw_fp will be set.
458          */
459         IWM_UNLOCK(sc);
460         fwp = firmware_get(sc->sc_fwname);
461         IWM_LOCK(sc);
462         if (fwp == NULL) {
463                 device_printf(sc->sc_dev,
464                     "could not read firmware %s (error %d)\n",
465                     sc->sc_fwname, error);
466                 goto out;
467         }
468         fw->fw_fp = fwp;
469
470         /*
471          * Parse firmware contents
472          */
473
474         uhdr = (const void *)fw->fw_fp->data;
475         if (*(const uint32_t *)fw->fw_fp->data != 0
476             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
477                 device_printf(sc->sc_dev, "invalid firmware %s\n",
478                     sc->sc_fwname);
479                 error = EINVAL;
480                 goto out;
481         }
482
483         sc->sc_fwver = le32toh(uhdr->ver);
484         data = uhdr->data;
485         len = fw->fw_fp->datasize - sizeof(*uhdr);
486
487         while (len >= sizeof(tlv)) {
488                 size_t tlv_len;
489                 const void *tlv_data;
490
491                 memcpy(&tlv, data, sizeof(tlv));
492                 tlv_len = le32toh(tlv.length);
493                 tlv_type = le32toh(tlv.type);
494
495                 len -= sizeof(tlv);
496                 data += sizeof(tlv);
497                 tlv_data = data;
498
499                 if (len < tlv_len) {
500                         device_printf(sc->sc_dev,
501                             "firmware too short: %zu bytes\n",
502                             len);
503                         error = EINVAL;
504                         goto parse_out;
505                 }
506
507                 switch ((int)tlv_type) {
508                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
509                         if (tlv_len < sizeof(uint32_t)) {
510                                 device_printf(sc->sc_dev,
511                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
512                                     __func__,
513                                     (int) tlv_len);
514                                 error = EINVAL;
515                                 goto parse_out;
516                         }
517                         sc->sc_capa_max_probe_len
518                             = le32toh(*(const uint32_t *)tlv_data);
519                         /* limit it to something sensible */
520                         if (sc->sc_capa_max_probe_len > (1<<16)) {
521                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
522                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
523                                     "ridiculous\n", __func__);
524                                 error = EINVAL;
525                                 goto parse_out;
526                         }
527                         break;
528                 case IWM_UCODE_TLV_PAN:
529                         if (tlv_len) {
530                                 device_printf(sc->sc_dev,
531                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
532                                     __func__,
533                                     (int) tlv_len);
534                                 error = EINVAL;
535                                 goto parse_out;
536                         }
537                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
538                         break;
539                 case IWM_UCODE_TLV_FLAGS:
540                         if (tlv_len < sizeof(uint32_t)) {
541                                 device_printf(sc->sc_dev,
542                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
543                                     __func__,
544                                     (int) tlv_len);
545                                 error = EINVAL;
546                                 goto parse_out;
547                         }
548                         /*
549                          * Apparently there can be many flags, but Linux driver
550                          * parses only the first one, and so do we.
551                          *
552                          * XXX: why does this override IWM_UCODE_TLV_PAN?
553                          * Intentional or a bug?  Observations from
554                          * current firmware file:
555                          *  1) TLV_PAN is parsed first
556                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
557                          * ==> this resets TLV_PAN to itself... hnnnk
558                          */
559                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
560                         break;
561                 case IWM_UCODE_TLV_CSCHEME:
562                         if ((error = iwm_store_cscheme(sc,
563                             tlv_data, tlv_len)) != 0) {
564                                 device_printf(sc->sc_dev,
565                                     "%s: iwm_store_cscheme(): returned %d\n",
566                                     __func__,
567                                     error);
568                                 goto parse_out;
569                         }
570                         break;
571                 case IWM_UCODE_TLV_NUM_OF_CPU:
572                         if (tlv_len != sizeof(uint32_t)) {
573                                 device_printf(sc->sc_dev,
574                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
575                                     __func__,
576                                     (int) tlv_len);
577                                 error = EINVAL;
578                                 goto parse_out;
579                         }
580                         if (le32toh(*(const uint32_t*)tlv_data) != 1) {
581                                 device_printf(sc->sc_dev,
582                                     "%s: driver supports "
583                                     "only TLV_NUM_OF_CPU == 1",
584                                     __func__);
585                                 error = EINVAL;
586                                 goto parse_out;
587                         }
588                         break;
589                 case IWM_UCODE_TLV_SEC_RT:
590                         if ((error = iwm_firmware_store_section(sc,
591                             IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
592                                 device_printf(sc->sc_dev,
593                                     "%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
594                                     __func__,
595                                     error);
596                                 goto parse_out;
597                         }
598                         break;
599                 case IWM_UCODE_TLV_SEC_INIT:
600                         if ((error = iwm_firmware_store_section(sc,
601                             IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
602                                 device_printf(sc->sc_dev,
603                                     "%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
604                                     __func__,
605                                     error);
606                                 goto parse_out;
607                         }
608                         break;
609                 case IWM_UCODE_TLV_SEC_WOWLAN:
610                         if ((error = iwm_firmware_store_section(sc,
611                             IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
612                                 device_printf(sc->sc_dev,
613                                     "%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
614                                     __func__,
615                                     error);
616                                 goto parse_out;
617                         }
618                         break;
619                 case IWM_UCODE_TLV_DEF_CALIB:
620                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
621                                 device_printf(sc->sc_dev,
622                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
623                                     __func__,
624                                     (int) tlv_len,
625                                     (int) sizeof(struct iwm_tlv_calib_data));
626                                 error = EINVAL;
627                                 goto parse_out;
628                         }
629                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
630                                 device_printf(sc->sc_dev,
631                                     "%s: iwm_set_default_calib() failed: %d\n",
632                                     __func__,
633                                     error);
634                                 goto parse_out;
635                         }
636                         break;
637                 case IWM_UCODE_TLV_PHY_SKU:
638                         if (tlv_len != sizeof(uint32_t)) {
639                                 error = EINVAL;
640                                 device_printf(sc->sc_dev,
641                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
642                                     __func__,
643                                     (int) tlv_len);
644                                 goto parse_out;
645                         }
646                         sc->sc_fw_phy_config =
647                             le32toh(*(const uint32_t *)tlv_data);
648                         break;
649
650                 case IWM_UCODE_TLV_API_CHANGES_SET:
651                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
652                         /* ignore, not used by current driver */
653                         break;
654
655                 default:
656                         device_printf(sc->sc_dev,
657                             "%s: unknown firmware section %d, abort\n",
658                             __func__, tlv_type);
659                         error = EINVAL;
660                         goto parse_out;
661                 }
662
663                 len -= roundup(tlv_len, 4);
664                 data += roundup(tlv_len, 4);
665         }
666
667         KASSERT(error == 0, ("unhandled error"));
668
669  parse_out:
670         if (error) {
671                 device_printf(sc->sc_dev, "firmware parse error %d, "
672                     "section type %d\n", error, tlv_type);
673         }
674
675         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
676                 device_printf(sc->sc_dev,
677                     "device uses unsupported power ops\n");
678                 error = ENOTSUP;
679         }
680
681  out:
682         if (error) {
683                 fw->fw_status = IWM_FW_STATUS_NONE;
684                 if (fw->fw_fp != NULL)
685                         iwm_fw_info_free(fw);
686         } else
687                 fw->fw_status = IWM_FW_STATUS_DONE;
688         wakeup(&sc->sc_fw);
689
690         return error;
691 }
692
693 /*
694  * DMA resource routines
695  */
696
697 static void
698 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
699 {
700         if (error != 0)
701                 return;
702         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
703         *(bus_addr_t *)arg = segs[0].ds_addr;
704 }
705
706 static int
707 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
708     bus_size_t size, bus_size_t alignment)
709 {
710         int error;
711
712         dma->tag = NULL;
713         dma->size = size;
714
715         error = bus_dma_tag_create(tag, alignment,
716             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
717             1, size, 0, NULL, NULL, &dma->tag);
718         if (error != 0)
719                 goto fail;
720
721         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
722             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
723         if (error != 0)
724                 goto fail;
725
726         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
727             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
728         if (error != 0)
729                 goto fail;
730
731         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
732
733         return 0;
734
735 fail:   iwm_dma_contig_free(dma);
736         return error;
737 }
738
739 static void
740 iwm_dma_contig_free(struct iwm_dma_info *dma)
741 {
742         if (dma->map != NULL) {
743                 if (dma->vaddr != NULL) {
744                         bus_dmamap_sync(dma->tag, dma->map,
745                             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
746                         bus_dmamap_unload(dma->tag, dma->map);
747                         bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
748                         dma->vaddr = NULL;
749                 }
750                 bus_dmamap_destroy(dma->tag, dma->map);
751                 dma->map = NULL;
752         }
753         if (dma->tag != NULL) {
754                 bus_dma_tag_destroy(dma->tag);
755                 dma->tag = NULL;
756         }
757
758 }
759
760 /* fwmem is used to load firmware onto the card */
761 static int
762 iwm_alloc_fwmem(struct iwm_softc *sc)
763 {
764         /* Must be aligned on a 16-byte boundary. */
765         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
766             sc->sc_fwdmasegsz, 16);
767 }
768
769 static void
770 iwm_free_fwmem(struct iwm_softc *sc)
771 {
772         iwm_dma_contig_free(&sc->fw_dma);
773 }
774
775 /* tx scheduler rings.  not used? */
776 static int
777 iwm_alloc_sched(struct iwm_softc *sc)
778 {
779         int rv;
780
781         /* TX scheduler rings must be aligned on a 1KB boundary. */
782         rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
783             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
784         return rv;
785 }
786
787 static void
788 iwm_free_sched(struct iwm_softc *sc)
789 {
790         iwm_dma_contig_free(&sc->sched_dma);
791 }
792
793 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
794 static int
795 iwm_alloc_kw(struct iwm_softc *sc)
796 {
797         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
798 }
799
800 static void
801 iwm_free_kw(struct iwm_softc *sc)
802 {
803         iwm_dma_contig_free(&sc->kw_dma);
804 }
805
806 /* interrupt cause table */
807 static int
808 iwm_alloc_ict(struct iwm_softc *sc)
809 {
810         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
811             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
812 }
813
814 static void
815 iwm_free_ict(struct iwm_softc *sc)
816 {
817         iwm_dma_contig_free(&sc->ict_dma);
818 }
819
820 static int
821 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
822 {
823         bus_size_t size;
824         int i, error;
825
826         ring->cur = 0;
827
828         /* Allocate RX descriptors (256-byte aligned). */
829         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
830         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
831         if (error != 0) {
832                 device_printf(sc->sc_dev,
833                     "could not allocate RX ring DMA memory\n");
834                 goto fail;
835         }
836         ring->desc = ring->desc_dma.vaddr;
837
838         /* Allocate RX status area (16-byte aligned). */
839         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
840             sizeof(*ring->stat), 16);
841         if (error != 0) {
842                 device_printf(sc->sc_dev,
843                     "could not allocate RX status DMA memory\n");
844                 goto fail;
845         }
846         ring->stat = ring->stat_dma.vaddr;
847
848         /* Create RX buffer DMA tag. */
849         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
850             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
851             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
852         if (error != 0) {
853                 device_printf(sc->sc_dev,
854                     "%s: could not create RX buf DMA tag, error %d\n",
855                     __func__, error);
856                 goto fail;
857         }
858
859         /*
860          * Allocate and map RX buffers.
861          */
862         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
863                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
864                         goto fail;
865                 }
866         }
867         return 0;
868
869 fail:   iwm_free_rx_ring(sc, ring);
870         return error;
871 }
872
873 static void
874 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
875 {
876
877         /* XXX print out if we can't lock the NIC? */
878         if (iwm_nic_lock(sc)) {
879                 /* XXX handle if RX stop doesn't finish? */
880                 (void) iwm_pcie_rx_stop(sc);
881                 iwm_nic_unlock(sc);
882         }
883         /* Reset the ring state */
884         ring->cur = 0;
885         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
886 }
887
888 static void
889 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
890 {
891         int i;
892
893         iwm_dma_contig_free(&ring->desc_dma);
894         iwm_dma_contig_free(&ring->stat_dma);
895
896         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
897                 struct iwm_rx_data *data = &ring->data[i];
898
899                 if (data->m != NULL) {
900                         bus_dmamap_sync(ring->data_dmat, data->map,
901                             BUS_DMASYNC_POSTREAD);
902                         bus_dmamap_unload(ring->data_dmat, data->map);
903                         m_freem(data->m);
904                         data->m = NULL;
905                 }
906                 if (data->map != NULL) {
907                         bus_dmamap_destroy(ring->data_dmat, data->map);
908                         data->map = NULL;
909                 }
910         }
911         if (ring->data_dmat != NULL) {
912                 bus_dma_tag_destroy(ring->data_dmat);
913                 ring->data_dmat = NULL;
914         }
915 }
916
917 static int
918 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
919 {
920         bus_addr_t paddr;
921         bus_size_t size;
922         int i, error;
923
924         ring->qid = qid;
925         ring->queued = 0;
926         ring->cur = 0;
927
928         /* Allocate TX descriptors (256-byte aligned). */
929         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
930         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
931         if (error != 0) {
932                 device_printf(sc->sc_dev,
933                     "could not allocate TX ring DMA memory\n");
934                 goto fail;
935         }
936         ring->desc = ring->desc_dma.vaddr;
937
938         /*
939          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
940          * to allocate commands space for other rings.
941          */
942         if (qid > IWM_MVM_CMD_QUEUE)
943                 return 0;
944
945         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
946         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
947         if (error != 0) {
948                 device_printf(sc->sc_dev,
949                     "could not allocate TX cmd DMA memory\n");
950                 goto fail;
951         }
952         ring->cmd = ring->cmd_dma.vaddr;
953
954         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
955             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
956             IWM_MAX_SCATTER - 2, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
957         if (error != 0) {
958                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
959                 goto fail;
960         }
961
962         paddr = ring->cmd_dma.paddr;
963         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
964                 struct iwm_tx_data *data = &ring->data[i];
965
966                 data->cmd_paddr = paddr;
967                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
968                     + offsetof(struct iwm_tx_cmd, scratch);
969                 paddr += sizeof(struct iwm_device_cmd);
970
971                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
972                 if (error != 0) {
973                         device_printf(sc->sc_dev,
974                             "could not create TX buf DMA map\n");
975                         goto fail;
976                 }
977         }
978         KASSERT(paddr == ring->cmd_dma.paddr + size,
979             ("invalid physical address"));
980         return 0;
981
982 fail:   iwm_free_tx_ring(sc, ring);
983         return error;
984 }
985
986 static void
987 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
988 {
989         int i;
990
991         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
992                 struct iwm_tx_data *data = &ring->data[i];
993
994                 if (data->m != NULL) {
995                         bus_dmamap_sync(ring->data_dmat, data->map,
996                             BUS_DMASYNC_POSTWRITE);
997                         bus_dmamap_unload(ring->data_dmat, data->map);
998                         m_freem(data->m);
999                         data->m = NULL;
1000                 }
1001         }
1002         /* Clear TX descriptors. */
1003         memset(ring->desc, 0, ring->desc_dma.size);
1004         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1005             BUS_DMASYNC_PREWRITE);
1006         sc->qfullmsk &= ~(1 << ring->qid);
1007         ring->queued = 0;
1008         ring->cur = 0;
1009 }
1010
1011 static void
1012 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1013 {
1014         int i;
1015
1016         iwm_dma_contig_free(&ring->desc_dma);
1017         iwm_dma_contig_free(&ring->cmd_dma);
1018
1019         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1020                 struct iwm_tx_data *data = &ring->data[i];
1021
1022                 if (data->m != NULL) {
1023                         bus_dmamap_sync(ring->data_dmat, data->map,
1024                             BUS_DMASYNC_POSTWRITE);
1025                         bus_dmamap_unload(ring->data_dmat, data->map);
1026                         m_freem(data->m);
1027                         data->m = NULL;
1028                 }
1029                 if (data->map != NULL) {
1030                         bus_dmamap_destroy(ring->data_dmat, data->map);
1031                         data->map = NULL;
1032                 }
1033         }
1034         if (ring->data_dmat != NULL) {
1035                 bus_dma_tag_destroy(ring->data_dmat);
1036                 ring->data_dmat = NULL;
1037         }
1038 }
1039
1040 /*
1041  * High-level hardware frobbing routines
1042  */
1043
1044 static void
1045 iwm_enable_interrupts(struct iwm_softc *sc)
1046 {
1047         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1048         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1049 }
1050
1051 static void
1052 iwm_restore_interrupts(struct iwm_softc *sc)
1053 {
1054         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1055 }
1056
1057 static void
1058 iwm_disable_interrupts(struct iwm_softc *sc)
1059 {
1060         /* disable interrupts */
1061         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1062
1063         /* acknowledge all interrupts */
1064         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1065         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1066 }
1067
1068 static void
1069 iwm_ict_reset(struct iwm_softc *sc)
1070 {
1071         iwm_disable_interrupts(sc);
1072
1073         /* Reset ICT table. */
1074         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1075         sc->ict_cur = 0;
1076
1077         /* Set physical address of ICT table (4KB aligned). */
1078         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1079             IWM_CSR_DRAM_INT_TBL_ENABLE
1080             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1081             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1082
1083         /* Switch to ICT interrupt mode in driver. */
1084         sc->sc_flags |= IWM_FLAG_USE_ICT;
1085
1086         /* Re-enable interrupts. */
1087         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1088         iwm_enable_interrupts(sc);
1089 }
1090
1091 /* iwlwifi pcie/trans.c */
1092
1093 /*
1094  * Since this .. hard-resets things, it's time to actually
1095  * mark the first vap (if any) as having no mac context.
1096  * It's annoying, but since the driver is potentially being
1097  * stop/start'ed whilst active (thanks openbsd port!) we
1098  * have to correctly track this.
1099  */
1100 static void
1101 iwm_stop_device(struct iwm_softc *sc)
1102 {
1103         struct ieee80211com *ic = &sc->sc_ic;
1104         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1105         int chnl, ntries;
1106         int qid;
1107
1108         /* tell the device to stop sending interrupts */
1109         iwm_disable_interrupts(sc);
1110
1111         /*
1112          * FreeBSD-local: mark the first vap as not-uploaded,
1113          * so the next transition through auth/assoc
1114          * will correctly populate the MAC context.
1115          */
1116         if (vap) {
1117                 struct iwm_vap *iv = IWM_VAP(vap);
1118                 iv->is_uploaded = 0;
1119         }
1120
1121         /* device going down, Stop using ICT table */
1122         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1123
1124         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1125
1126         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1127
1128         /* Stop all DMA channels. */
1129         if (iwm_nic_lock(sc)) {
1130                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1131                         IWM_WRITE(sc,
1132                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1133                         for (ntries = 0; ntries < 200; ntries++) {
1134                                 uint32_t r;
1135
1136                                 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1137                                 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1138                                     chnl))
1139                                         break;
1140                                 DELAY(20);
1141                         }
1142                 }
1143                 iwm_nic_unlock(sc);
1144         }
1145
1146         /* Stop RX ring. */
1147         iwm_reset_rx_ring(sc, &sc->rxq);
1148
1149         /* Reset all TX rings. */
1150         for (qid = 0; qid < nitems(sc->txq); qid++)
1151                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1152
1153         /*
1154          * Power-down device's busmaster DMA clocks
1155          */
1156         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1157         DELAY(5);
1158
1159         /* Make sure (redundant) we've released our request to stay awake */
1160         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1161             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1162
1163         /* Stop the device, and put it in low power state */
1164         iwm_apm_stop(sc);
1165
1166         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1167          * Clean again the interrupt here
1168          */
1169         iwm_disable_interrupts(sc);
1170         /* stop and reset the on-board processor */
1171         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1172
1173         /*
1174          * Even if we stop the HW, we still want the RF kill
1175          * interrupt
1176          */
1177         iwm_enable_rfkill_int(sc);
1178         iwm_check_rfkill(sc);
1179 }
1180
1181 /* iwlwifi: mvm/ops.c */
1182 static void
1183 iwm_mvm_nic_config(struct iwm_softc *sc)
1184 {
1185         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1186         uint32_t reg_val = 0;
1187
1188         radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1189             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1190         radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1191             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1192         radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1193             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1194
1195         /* SKU control */
1196         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1197             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1198         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1199             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1200
1201         /* radio configuration */
1202         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1203         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1204         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1205
1206         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1207
1208         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1209             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1210             radio_cfg_step, radio_cfg_dash);
1211
1212         /*
1213          * W/A : NIC is stuck in a reset state after Early PCIe power off
1214          * (PCIe power is lost before PERST# is asserted), causing ME FW
1215          * to lose ownership and not being able to obtain it back.
1216          */
1217         iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1218             IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1219             ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1220 }
1221
1222 static int
1223 iwm_nic_rx_init(struct iwm_softc *sc)
1224 {
1225         if (!iwm_nic_lock(sc))
1226                 return EBUSY;
1227
1228         /*
1229          * Initialize RX ring.  This is from the iwn driver.
1230          */
1231         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1232
1233         /* stop DMA */
1234         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1235         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1236         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1237         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1238         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1239
1240         /* Set physical address of RX ring (256-byte aligned). */
1241         IWM_WRITE(sc,
1242             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1243
1244         /* Set physical address of RX status (16-byte aligned). */
1245         IWM_WRITE(sc,
1246             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1247
1248         /* Enable RX. */
1249         /*
1250          * Note: Linux driver also sets this:
1251          *  (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1252          *
1253          * It causes weird behavior.  YMMV.
1254          */
1255         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1256             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1257             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1258             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1259             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1260             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1261
1262         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1263
1264         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1265         if (sc->host_interrupt_operation_mode)
1266                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1267
1268         /*
1269          * Thus sayeth el jefe (iwlwifi) via a comment:
1270          *
1271          * This value should initially be 0 (before preparing any
1272          * RBs), should be 8 after preparing the first 8 RBs (for example)
1273          */
1274         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1275
1276         iwm_nic_unlock(sc);
1277
1278         return 0;
1279 }
1280
1281 static int
1282 iwm_nic_tx_init(struct iwm_softc *sc)
1283 {
1284         int qid;
1285
1286         if (!iwm_nic_lock(sc))
1287                 return EBUSY;
1288
1289         /* Deactivate TX scheduler. */
1290         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1291
1292         /* Set physical address of "keep warm" page (16-byte aligned). */
1293         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1294
1295         /* Initialize TX rings. */
1296         for (qid = 0; qid < nitems(sc->txq); qid++) {
1297                 struct iwm_tx_ring *txq = &sc->txq[qid];
1298
1299                 /* Set physical address of TX ring (256-byte aligned). */
1300                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1301                     txq->desc_dma.paddr >> 8);
1302                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1303                     "%s: loading ring %d descriptors (%p) at %lx\n",
1304                     __func__,
1305                     qid, txq->desc,
1306                     (unsigned long) (txq->desc_dma.paddr >> 8));
1307         }
1308         iwm_nic_unlock(sc);
1309
1310         return 0;
1311 }
1312
1313 static int
1314 iwm_nic_init(struct iwm_softc *sc)
1315 {
1316         int error;
1317
1318         iwm_apm_init(sc);
1319         iwm_set_pwr(sc);
1320
1321         iwm_mvm_nic_config(sc);
1322
1323         if ((error = iwm_nic_rx_init(sc)) != 0)
1324                 return error;
1325
1326         /*
1327          * Ditto for TX, from iwn
1328          */
1329         if ((error = iwm_nic_tx_init(sc)) != 0)
1330                 return error;
1331
1332         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1333             "%s: shadow registers enabled\n", __func__);
1334         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1335
1336         return 0;
1337 }
1338
1339 enum iwm_mvm_tx_fifo {
1340         IWM_MVM_TX_FIFO_BK = 0,
1341         IWM_MVM_TX_FIFO_BE,
1342         IWM_MVM_TX_FIFO_VI,
1343         IWM_MVM_TX_FIFO_VO,
1344         IWM_MVM_TX_FIFO_MCAST = 5,
1345 };
1346
1347 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1348         IWM_MVM_TX_FIFO_VO,
1349         IWM_MVM_TX_FIFO_VI,
1350         IWM_MVM_TX_FIFO_BE,
1351         IWM_MVM_TX_FIFO_BK,
1352 };
1353
1354 static void
1355 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
1356 {
1357         if (!iwm_nic_lock(sc)) {
1358                 device_printf(sc->sc_dev,
1359                     "%s: cannot enable txq %d\n",
1360                     __func__,
1361                     qid);
1362                 return; /* XXX return EBUSY */
1363         }
1364
1365         /* unactivate before configuration */
1366         iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1367             (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1368             | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1369
1370         if (qid != IWM_MVM_CMD_QUEUE) {
1371                 iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
1372         }
1373
1374         iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1375
1376         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1377         iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1378
1379         iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1380         /* Set scheduler window size and frame limit. */
1381         iwm_write_mem32(sc,
1382             sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1383             sizeof(uint32_t),
1384             ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1385             IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1386             ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1387             IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1388
1389         iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1390             (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1391             (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1392             (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1393             IWM_SCD_QUEUE_STTS_REG_MSK);
1394
1395         iwm_nic_unlock(sc);
1396
1397         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1398             "%s: enabled txq %d FIFO %d\n",
1399             __func__, qid, fifo);
1400 }
1401
1402 static int
1403 iwm_post_alive(struct iwm_softc *sc)
1404 {
1405         int nwords;
1406         int error, chnl;
1407
1408         if (!iwm_nic_lock(sc))
1409                 return EBUSY;
1410
1411         if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
1412                 device_printf(sc->sc_dev,
1413                     "%s: sched addr mismatch",
1414                     __func__);
1415                 error = EINVAL;
1416                 goto out;
1417         }
1418
1419         iwm_ict_reset(sc);
1420
1421         /* Clear TX scheduler state in SRAM. */
1422         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1423             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1424             / sizeof(uint32_t);
1425         error = iwm_write_mem(sc,
1426             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1427             NULL, nwords);
1428         if (error)
1429                 goto out;
1430
1431         /* Set physical address of TX scheduler rings (1KB aligned). */
1432         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1433
1434         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1435
1436         /* enable command channel */
1437         iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
1438
1439         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1440
1441         /* Enable DMA channels. */
1442         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1443                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1444                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1445                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1446         }
1447
1448         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1449             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1450
1451         /* Enable L1-Active */
1452         iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1453             IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1454
1455  out:
1456         iwm_nic_unlock(sc);
1457         return error;
1458 }
1459
1460 /*
1461  * NVM read access and content parsing.  We do not support
1462  * external NVM or writing NVM.
1463  * iwlwifi/mvm/nvm.c
1464  */
1465
1466 /* list of NVM sections we are allowed/need to read */
1467 const int nvm_to_read[] = {
1468         IWM_NVM_SECTION_TYPE_HW,
1469         IWM_NVM_SECTION_TYPE_SW,
1470         IWM_NVM_SECTION_TYPE_CALIBRATION,
1471         IWM_NVM_SECTION_TYPE_PRODUCTION,
1472 };
1473
1474 /* Default NVM size to read */
1475 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1476 #define IWM_MAX_NVM_SECTION_SIZE 7000
1477
1478 #define IWM_NVM_WRITE_OPCODE 1
1479 #define IWM_NVM_READ_OPCODE 0
1480
1481 static int
1482 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1483         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1484 {
1485         offset = 0;
1486         struct iwm_nvm_access_cmd nvm_access_cmd = {
1487                 .offset = htole16(offset),
1488                 .length = htole16(length),
1489                 .type = htole16(section),
1490                 .op_code = IWM_NVM_READ_OPCODE,
1491         };
1492         struct iwm_nvm_access_resp *nvm_resp;
1493         struct iwm_rx_packet *pkt;
1494         struct iwm_host_cmd cmd = {
1495                 .id = IWM_NVM_ACCESS_CMD,
1496                 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
1497                     IWM_CMD_SEND_IN_RFKILL,
1498                 .data = { &nvm_access_cmd, },
1499         };
1500         int ret, bytes_read, offset_read;
1501         uint8_t *resp_data;
1502
1503         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1504
1505         ret = iwm_send_cmd(sc, &cmd);
1506         if (ret)
1507                 return ret;
1508
1509         pkt = cmd.resp_pkt;
1510         if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
1511                 device_printf(sc->sc_dev,
1512                     "%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
1513                     __func__, pkt->hdr.flags);
1514                 ret = EIO;
1515                 goto exit;
1516         }
1517
1518         /* Extract NVM response */
1519         nvm_resp = (void *)pkt->data;
1520
1521         ret = le16toh(nvm_resp->status);
1522         bytes_read = le16toh(nvm_resp->length);
1523         offset_read = le16toh(nvm_resp->offset);
1524         resp_data = nvm_resp->data;
1525         if (ret) {
1526                 device_printf(sc->sc_dev,
1527                     "%s: NVM access command failed with status %d\n",
1528                     __func__, ret);
1529                 ret = EINVAL;
1530                 goto exit;
1531         }
1532
1533         if (offset_read != offset) {
1534                 device_printf(sc->sc_dev,
1535                     "%s: NVM ACCESS response with invalid offset %d\n",
1536                     __func__, offset_read);
1537                 ret = EINVAL;
1538                 goto exit;
1539         }
1540
1541         memcpy(data + offset, resp_data, bytes_read);
1542         *len = bytes_read;
1543
1544  exit:
1545         iwm_free_resp(sc, &cmd);
1546         return ret;
1547 }
1548
1549 /*
1550  * Reads an NVM section completely.
1551  * NICs prior to 7000 family doesn't have a real NVM, but just read
1552  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1553  * by uCode, we need to manually check in this case that we don't
1554  * overflow and try to read more than the EEPROM size.
1555  * For 7000 family NICs, we supply the maximal size we can read, and
1556  * the uCode fills the response with as much data as we can,
1557  * without overflowing, so no check is needed.
1558  */
1559 static int
1560 iwm_nvm_read_section(struct iwm_softc *sc,
1561         uint16_t section, uint8_t *data, uint16_t *len)
1562 {
1563         uint16_t length, seglen;
1564         int error;
1565
1566         /* Set nvm section read length */
1567         length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
1568         *len = 0;
1569
1570         /* Read the NVM until exhausted (reading less than requested) */
1571         while (seglen == length) {
1572                 error = iwm_nvm_read_chunk(sc,
1573                     section, *len, length, data, &seglen);
1574                 if (error) {
1575                         device_printf(sc->sc_dev,
1576                             "Cannot read NVM from section "
1577                             "%d offset %d, length %d\n",
1578                             section, *len, length);
1579                         return error;
1580                 }
1581                 *len += seglen;
1582         }
1583
1584         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1585             "NVM section %d read completed\n", section);
1586         return 0;
1587 }
1588
1589 /*
1590  * BEGIN IWM_NVM_PARSE
1591  */
1592
1593 /* iwlwifi/iwl-nvm-parse.c */
1594
1595 /* NVM offsets (in words) definitions */
1596 enum wkp_nvm_offsets {
1597         /* NVM HW-Section offset (in words) definitions */
1598         IWM_HW_ADDR = 0x15,
1599
1600 /* NVM SW-Section offset (in words) definitions */
1601         IWM_NVM_SW_SECTION = 0x1C0,
1602         IWM_NVM_VERSION = 0,
1603         IWM_RADIO_CFG = 1,
1604         IWM_SKU = 2,
1605         IWM_N_HW_ADDRS = 3,
1606         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1607
1608 /* NVM calibration section offset (in words) definitions */
1609         IWM_NVM_CALIB_SECTION = 0x2B8,
1610         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1611 };
1612
1613 /* SKU Capabilities (actual values from NVM definition) */
1614 enum nvm_sku_bits {
1615         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1616         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1617         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1618         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1619 };
1620
1621 /* radio config bits (actual values from NVM definition) */
1622 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1623 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1624 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1625 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1626 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1627 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1628
1629 #define DEFAULT_MAX_TX_POWER 16
1630
1631 /**
1632  * enum iwm_nvm_channel_flags - channel flags in NVM
1633  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1634  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1635  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1636  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1637  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1638  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1639  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1640  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1641  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1642  */
1643 enum iwm_nvm_channel_flags {
1644         IWM_NVM_CHANNEL_VALID = (1 << 0),
1645         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1646         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1647         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1648         IWM_NVM_CHANNEL_DFS = (1 << 7),
1649         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1650         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1651         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1652         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1653 };
1654
1655 /*
1656  * Add a channel to the net80211 channel list.
1657  *
1658  * ieee is the ieee channel number
1659  * ch_idx is channel index.
1660  * mode is the channel mode - CHAN_A, CHAN_B, CHAN_G.
1661  * ch_flags is the iwm channel flags.
1662  *
1663  * Return 0 on OK, < 0 on error.
1664  */
1665 static int
1666 iwm_init_net80211_channel(struct iwm_softc *sc, int ieee, int ch_idx,
1667     int mode, uint16_t ch_flags)
1668 {
1669         /* XXX for now, no overflow checking! */
1670         struct ieee80211com *ic = &sc->sc_ic;
1671         int is_5ghz, flags;
1672         struct ieee80211_channel *channel;
1673
1674         channel = &ic->ic_channels[ic->ic_nchans++];
1675         channel->ic_ieee = ieee;
1676
1677         is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
1678         if (!is_5ghz) {
1679                 flags = IEEE80211_CHAN_2GHZ;
1680                 channel->ic_flags = mode;
1681         } else {
1682                 flags = IEEE80211_CHAN_5GHZ;
1683                 channel->ic_flags = mode;
1684         }
1685         channel->ic_freq = ieee80211_ieee2mhz(ieee, flags);
1686
1687         if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
1688                 channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
1689         return (0);
1690 }
1691
1692 static void
1693 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags)
1694 {
1695         struct ieee80211com *ic = &sc->sc_ic;
1696         struct iwm_nvm_data *data = &sc->sc_nvm;
1697         int ch_idx;
1698         uint16_t ch_flags;
1699         int hw_value;
1700
1701         for (ch_idx = 0; ch_idx < nitems(iwm_nvm_channels); ch_idx++) {
1702                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1703
1704                 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
1705                     !data->sku_cap_band_52GHz_enable)
1706                         ch_flags &= ~IWM_NVM_CHANNEL_VALID;
1707
1708                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1709                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1710                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
1711                             iwm_nvm_channels[ch_idx],
1712                             ch_flags,
1713                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1714                             "5.2" : "2.4");
1715                         continue;
1716                 }
1717
1718                 hw_value = iwm_nvm_channels[ch_idx];
1719
1720                 /* 5GHz? */
1721                 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS) {
1722                         (void) iwm_init_net80211_channel(sc, hw_value,
1723                             ch_idx,
1724                             IEEE80211_CHAN_A,
1725                             ch_flags);
1726                 } else {
1727                         (void) iwm_init_net80211_channel(sc, hw_value,
1728                             ch_idx,
1729                             IEEE80211_CHAN_B,
1730                             ch_flags);
1731                         /* If it's not channel 13, also add 11g */
1732                         if (hw_value != 13)
1733                                 (void) iwm_init_net80211_channel(sc, hw_value,
1734                                     ch_idx,
1735                                     IEEE80211_CHAN_G,
1736                                     ch_flags);
1737                 }
1738
1739                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1740                     "Ch. %d Flags %x [%sGHz] - Added\n",
1741                     iwm_nvm_channels[ch_idx],
1742                     ch_flags,
1743                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1744                     "5.2" : "2.4");
1745         }
1746         ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
1747 }
1748
1749 static int
1750 iwm_parse_nvm_data(struct iwm_softc *sc,
1751         const uint16_t *nvm_hw, const uint16_t *nvm_sw,
1752         const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
1753 {
1754         struct iwm_nvm_data *data = &sc->sc_nvm;
1755         uint8_t hw_addr[IEEE80211_ADDR_LEN];
1756         uint16_t radio_cfg, sku;
1757
1758         data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
1759
1760         radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
1761         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
1762         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
1763         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
1764         data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
1765         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
1766         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
1767
1768         sku = le16_to_cpup(nvm_sw + IWM_SKU);
1769         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
1770         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
1771         data->sku_cap_11n_enable = 0;
1772
1773         if (!data->valid_tx_ant || !data->valid_rx_ant) {
1774                 device_printf(sc->sc_dev,
1775                     "%s: invalid antennas (0x%x, 0x%x)\n",
1776                     __func__, data->valid_tx_ant,
1777                     data->valid_rx_ant);
1778                 return EINVAL;
1779         }
1780
1781         data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
1782
1783         data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
1784         data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
1785
1786         /* The byte order is little endian 16 bit, meaning 214365 */
1787         IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
1788         data->hw_addr[0] = hw_addr[1];
1789         data->hw_addr[1] = hw_addr[0];
1790         data->hw_addr[2] = hw_addr[3];
1791         data->hw_addr[3] = hw_addr[2];
1792         data->hw_addr[4] = hw_addr[5];
1793         data->hw_addr[5] = hw_addr[4];
1794
1795         iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS]);
1796         data->calib_version = 255;   /* TODO:
1797                                         this value will prevent some checks from
1798                                         failing, we need to check if this
1799                                         field is still needed, and if it does,
1800                                         where is it in the NVM */
1801
1802         return 0;
1803 }
1804
1805 /*
1806  * END NVM PARSE
1807  */
1808
1809 struct iwm_nvm_section {
1810         uint16_t length;
1811         const uint8_t *data;
1812 };
1813
1814 static int
1815 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
1816 {
1817         const uint16_t *hw, *sw, *calib;
1818
1819         /* Checking for required sections */
1820         if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
1821             !sections[IWM_NVM_SECTION_TYPE_HW].data) {
1822                 device_printf(sc->sc_dev,
1823                     "%s: Can't parse empty NVM sections\n",
1824                     __func__);
1825                 return ENOENT;
1826         }
1827
1828         hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
1829         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
1830         calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
1831         return iwm_parse_nvm_data(sc, hw, sw, calib,
1832             IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
1833 }
1834
1835 static int
1836 iwm_nvm_init(struct iwm_softc *sc)
1837 {
1838         struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
1839         int i, section, error;
1840         uint16_t len;
1841         uint8_t *nvm_buffer, *temp;
1842
1843         /* Read From FW NVM */
1844         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1845             "%s: Read NVM\n",
1846             __func__);
1847
1848         /* TODO: find correct NVM max size for a section */
1849         nvm_buffer = malloc(IWM_OTP_LOW_IMAGE_SIZE, M_DEVBUF, M_NOWAIT);
1850         if (nvm_buffer == NULL)
1851                 return (ENOMEM);
1852         for (i = 0; i < nitems(nvm_to_read); i++) {
1853                 section = nvm_to_read[i];
1854                 KASSERT(section <= nitems(nvm_sections),
1855                     ("too many sections"));
1856
1857                 error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
1858                 if (error)
1859                         break;
1860
1861                 temp = malloc(len, M_DEVBUF, M_NOWAIT);
1862                 if (temp == NULL) {
1863                         error = ENOMEM;
1864                         break;
1865                 }
1866                 memcpy(temp, nvm_buffer, len);
1867                 nvm_sections[section].data = temp;
1868                 nvm_sections[section].length = len;
1869         }
1870         free(nvm_buffer, M_DEVBUF);
1871         if (error)
1872                 return error;
1873
1874         return iwm_parse_nvm_sections(sc, nvm_sections);
1875 }
1876
1877 /*
1878  * Firmware loading gunk.  This is kind of a weird hybrid between the
1879  * iwn driver and the Linux iwlwifi driver.
1880  */
1881
1882 static int
1883 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
1884         const uint8_t *section, uint32_t byte_cnt)
1885 {
1886         struct iwm_dma_info *dma = &sc->fw_dma;
1887         int error;
1888
1889         /* Copy firmware section into pre-allocated DMA-safe memory. */
1890         memcpy(dma->vaddr, section, byte_cnt);
1891         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1892
1893         if (!iwm_nic_lock(sc))
1894                 return EBUSY;
1895
1896         sc->sc_fw_chunk_done = 0;
1897
1898         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
1899             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
1900         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
1901             dst_addr);
1902         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
1903             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
1904         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
1905             (iwm_get_dma_hi_addr(dma->paddr)
1906               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
1907         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
1908             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
1909             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
1910             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
1911         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
1912             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
1913             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
1914             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
1915
1916         iwm_nic_unlock(sc);
1917
1918         /* wait 1s for this segment to load */
1919         while (!sc->sc_fw_chunk_done)
1920                 if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
1921                         break;
1922
1923         return error;
1924 }
1925
1926 static int
1927 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
1928 {
1929         struct iwm_fw_sects *fws;
1930         int error, i, w;
1931         const void *data;
1932         uint32_t dlen;
1933         uint32_t offset;
1934
1935         sc->sc_uc.uc_intr = 0;
1936
1937         fws = &sc->sc_fw.fw_sects[ucode_type];
1938         for (i = 0; i < fws->fw_count; i++) {
1939                 data = fws->fw_sect[i].fws_data;
1940                 dlen = fws->fw_sect[i].fws_len;
1941                 offset = fws->fw_sect[i].fws_devoff;
1942                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
1943                     "LOAD FIRMWARE type %d offset %u len %d\n",
1944                     ucode_type, offset, dlen);
1945                 error = iwm_firmware_load_chunk(sc, offset, data, dlen);
1946                 if (error) {
1947                         device_printf(sc->sc_dev,
1948                             "%s: chunk %u of %u returned error %02d\n",
1949                             __func__, i, fws->fw_count, error);
1950                         return error;
1951                 }
1952         }
1953
1954         /* wait for the firmware to load */
1955         IWM_WRITE(sc, IWM_CSR_RESET, 0);
1956
1957         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
1958                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
1959         }
1960
1961         return error;
1962 }
1963
1964 /* iwlwifi: pcie/trans.c */
1965 static int
1966 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
1967 {
1968         int error;
1969
1970         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1971
1972         if ((error = iwm_nic_init(sc)) != 0) {
1973                 device_printf(sc->sc_dev, "unable to init nic\n");
1974                 return error;
1975         }
1976
1977         /* make sure rfkill handshake bits are cleared */
1978         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
1979         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
1980             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1981
1982         /* clear (again), then enable host interrupts */
1983         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1984         iwm_enable_interrupts(sc);
1985
1986         /* really make sure rfkill handshake bits are cleared */
1987         /* maybe we should write a few times more?  just to make sure */
1988         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
1989         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
1990
1991         /* Load the given image to the HW */
1992         return iwm_load_firmware(sc, ucode_type);
1993 }
1994
1995 static int
1996 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
1997 {
1998         return iwm_post_alive(sc);
1999 }
2000
2001 static int
2002 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2003 {
2004         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2005                 .valid = htole32(valid_tx_ant),
2006         };
2007
2008         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2009             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2010 }
2011
2012 /* iwlwifi: mvm/fw.c */
2013 static int
2014 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2015 {
2016         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2017         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2018
2019         /* Set parameters */
2020         phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2021         phy_cfg_cmd.calib_control.event_trigger =
2022             sc->sc_default_calib[ucode_type].event_trigger;
2023         phy_cfg_cmd.calib_control.flow_trigger =
2024             sc->sc_default_calib[ucode_type].flow_trigger;
2025
2026         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2027             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2028         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2029             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2030 }
2031
2032 static int
2033 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2034         enum iwm_ucode_type ucode_type)
2035 {
2036         enum iwm_ucode_type old_type = sc->sc_uc_current;
2037         int error;
2038
2039         if ((error = iwm_read_firmware(sc, ucode_type)) != 0)
2040                 return error;
2041
2042         sc->sc_uc_current = ucode_type;
2043         error = iwm_start_fw(sc, ucode_type);
2044         if (error) {
2045                 sc->sc_uc_current = old_type;
2046                 return error;
2047         }
2048
2049         return iwm_fw_alive(sc, sc->sched_base);
2050 }
2051
2052 /*
2053  * mvm misc bits
2054  */
2055
2056 /*
2057  * follows iwlwifi/fw.c
2058  */
2059 static int
2060 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2061 {
2062         int error;
2063
2064         /* do not operate with rfkill switch turned on */
2065         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2066                 device_printf(sc->sc_dev,
2067                     "radio is disabled by hardware switch\n");
2068                 return EPERM;
2069         }
2070
2071         sc->sc_init_complete = 0;
2072         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2073             IWM_UCODE_TYPE_INIT)) != 0)
2074                 return error;
2075
2076         if (justnvm) {
2077                 if ((error = iwm_nvm_init(sc)) != 0) {
2078                         device_printf(sc->sc_dev, "failed to read nvm\n");
2079                         return error;
2080                 }
2081                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
2082
2083                 sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
2084                     + sc->sc_capa_max_probe_len
2085                     + IWM_MAX_NUM_SCAN_CHANNELS
2086                     * sizeof(struct iwm_scan_channel);
2087                 sc->sc_scan_cmd = malloc(sc->sc_scan_cmd_len, M_DEVBUF,
2088                     M_NOWAIT);
2089                 if (sc->sc_scan_cmd == NULL)
2090                         return (ENOMEM);
2091
2092                 return 0;
2093         }
2094
2095         /* Send TX valid antennas before triggering calibrations */
2096         if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
2097                 return error;
2098
2099         /*
2100         * Send phy configurations command to init uCode
2101         * to start the 16.0 uCode init image internal calibrations.
2102         */
2103         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2104                 device_printf(sc->sc_dev,
2105                     "%s: failed to run internal calibration: %d\n",
2106                     __func__, error);
2107                 return error;
2108         }
2109
2110         /*
2111          * Nothing to do but wait for the init complete notification
2112          * from the firmware
2113          */
2114         while (!sc->sc_init_complete)
2115                 if ((error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2116                     0, "iwminit", 2*hz)) != 0)
2117                         break;
2118
2119         return error;
2120 }
2121
2122 /*
2123  * receive side
2124  */
2125
2126 /* (re)stock rx ring, called at init-time and at runtime */
2127 static int
2128 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2129 {
2130         struct iwm_rx_ring *ring = &sc->rxq;
2131         struct iwm_rx_data *data = &ring->data[idx];
2132         struct mbuf *m;
2133         int error;
2134         bus_addr_t paddr;
2135
2136         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2137         if (m == NULL)
2138                 return ENOBUFS;
2139
2140         if (data->m != NULL)
2141                 bus_dmamap_unload(ring->data_dmat, data->map);
2142
2143         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2144         error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2145         if (error != 0) {
2146                 device_printf(sc->sc_dev,
2147                     "%s: could not create RX buf DMA map, error %d\n",
2148                     __func__, error);
2149                 goto fail;
2150         }
2151         data->m = m;
2152         error = bus_dmamap_load(ring->data_dmat, data->map,
2153             mtod(data->m, void *), IWM_RBUF_SIZE, iwm_dma_map_addr,
2154             &paddr, BUS_DMA_NOWAIT);
2155         if (error != 0 && error != EFBIG) {
2156                 device_printf(sc->sc_dev,
2157                     "%s: can't not map mbuf, error %d\n", __func__,
2158                     error);
2159                 goto fail;
2160         }
2161         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2162
2163         /* Update RX descriptor. */
2164         ring->desc[idx] = htole32(paddr >> 8);
2165         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2166             BUS_DMASYNC_PREWRITE);
2167
2168         return 0;
2169 fail:
2170         return error;
2171 }
2172
2173 /* iwlwifi: mvm/rx.c */
2174 #define IWM_RSSI_OFFSET 50
2175 static int
2176 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2177 {
2178         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2179         uint32_t agc_a, agc_b;
2180         uint32_t val;
2181
2182         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2183         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2184         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2185
2186         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2187         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2188         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2189
2190         /*
2191          * dBm = rssi dB - agc dB - constant.
2192          * Higher AGC (higher radio gain) means lower signal.
2193          */
2194         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2195         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2196         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2197
2198         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2199             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2200             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2201
2202         return max_rssi_dbm;
2203 }
2204
2205 /* iwlwifi: mvm/rx.c */
2206 /*
2207  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2208  * values are reported by the fw as positive values - need to negate
2209  * to obtain their dBM.  Account for missing antennas by replacing 0
2210  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2211  */
2212 static int
2213 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2214 {
2215         int energy_a, energy_b, energy_c, max_energy;
2216         uint32_t val;
2217
2218         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2219         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2220             IWM_RX_INFO_ENERGY_ANT_A_POS;
2221         energy_a = energy_a ? -energy_a : -256;
2222         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2223             IWM_RX_INFO_ENERGY_ANT_B_POS;
2224         energy_b = energy_b ? -energy_b : -256;
2225         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2226             IWM_RX_INFO_ENERGY_ANT_C_POS;
2227         energy_c = energy_c ? -energy_c : -256;
2228         max_energy = MAX(energy_a, energy_b);
2229         max_energy = MAX(max_energy, energy_c);
2230
2231         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2232             "energy In A %d B %d C %d , and max %d\n",
2233             energy_a, energy_b, energy_c, max_energy);
2234
2235         return max_energy;
2236 }
2237
2238 static void
2239 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2240         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2241 {
2242         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2243
2244         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
2245         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2246
2247         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
2248 }
2249
2250 /*
2251  * Retrieve the average noise (in dBm) among receivers.
2252  */
2253 static int
2254 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
2255 {
2256         int i, total, nbant, noise;
2257
2258         total = nbant = noise = 0;
2259         for (i = 0; i < 3; i++) {
2260                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
2261                 if (noise) {
2262                         total += noise;
2263                         nbant++;
2264                 }
2265         }
2266
2267         /* There should be at least one antenna but check anyway. */
2268         return (nbant == 0) ? -127 : (total / nbant) - 107;
2269 }
2270
2271 /*
2272  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
2273  *
2274  * Handles the actual data of the Rx packet from the fw
2275  */
2276 static void
2277 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
2278         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2279 {
2280         struct ieee80211com *ic = &sc->sc_ic;
2281         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2282         struct ieee80211_frame *wh;
2283         struct ieee80211_node *ni;
2284         struct ieee80211_rx_stats rxs;
2285         struct mbuf *m;
2286         struct iwm_rx_phy_info *phy_info;
2287         struct iwm_rx_mpdu_res_start *rx_res;
2288         uint32_t len;
2289         uint32_t rx_pkt_status;
2290         int rssi;
2291
2292         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2293
2294         phy_info = &sc->sc_last_phy_info;
2295         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
2296         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
2297         len = le16toh(rx_res->byte_count);
2298         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
2299
2300         m = data->m;
2301         m->m_data = pkt->data + sizeof(*rx_res);
2302         m->m_pkthdr.len = m->m_len = len;
2303
2304         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
2305                 device_printf(sc->sc_dev,
2306                     "dsp size out of range [0,20]: %d\n",
2307                     phy_info->cfg_phy_cnt);
2308                 return;
2309         }
2310
2311         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
2312             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
2313                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2314                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
2315                 return; /* drop */
2316         }
2317
2318         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
2319                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
2320         } else {
2321                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
2322         }
2323         rssi = (0 - IWM_MIN_DBM) + rssi;        /* normalize */
2324         rssi = MIN(rssi, sc->sc_max_rssi);      /* clip to max. 100% */
2325
2326         /* replenish ring for the buffer we're going to feed to the sharks */
2327         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
2328                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
2329                     __func__);
2330                 return;
2331         }
2332
2333         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2334
2335         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2336             "%s: phy_info: channel=%d, flags=0x%08x\n",
2337             __func__,
2338             le16toh(phy_info->channel),
2339             le16toh(phy_info->phy_flags));
2340
2341         /*
2342          * Populate an RX state struct with the provided information.
2343          */
2344         bzero(&rxs, sizeof(rxs));
2345         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
2346         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
2347         rxs.c_ieee = le16toh(phy_info->channel);
2348         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
2349                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
2350         } else {
2351                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
2352         }
2353         rxs.rssi = rssi - sc->sc_noise;
2354         rxs.nf = sc->sc_noise;
2355
2356         if (ieee80211_radiotap_active_vap(vap)) {
2357                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
2358
2359                 tap->wr_flags = 0;
2360                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
2361                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2362                 tap->wr_chan_freq = htole16(rxs.c_freq);
2363                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
2364                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
2365                 tap->wr_dbm_antsignal = (int8_t)rssi;
2366                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
2367                 tap->wr_tsft = phy_info->system_timestamp;
2368                 switch (phy_info->rate) {
2369                 /* CCK rates. */
2370                 case  10: tap->wr_rate =   2; break;
2371                 case  20: tap->wr_rate =   4; break;
2372                 case  55: tap->wr_rate =  11; break;
2373                 case 110: tap->wr_rate =  22; break;
2374                 /* OFDM rates. */
2375                 case 0xd: tap->wr_rate =  12; break;
2376                 case 0xf: tap->wr_rate =  18; break;
2377                 case 0x5: tap->wr_rate =  24; break;
2378                 case 0x7: tap->wr_rate =  36; break;
2379                 case 0x9: tap->wr_rate =  48; break;
2380                 case 0xb: tap->wr_rate =  72; break;
2381                 case 0x1: tap->wr_rate =  96; break;
2382                 case 0x3: tap->wr_rate = 108; break;
2383                 /* Unknown rate: should not happen. */
2384                 default:  tap->wr_rate =   0;
2385                 }
2386         }
2387
2388         IWM_UNLOCK(sc);
2389         if (ni != NULL) {
2390                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
2391                 ieee80211_input_mimo(ni, m, &rxs);
2392                 ieee80211_free_node(ni);
2393         } else {
2394                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
2395                 ieee80211_input_mimo_all(ic, m, &rxs);
2396         }
2397         IWM_LOCK(sc);
2398 }
2399
2400 static int
2401 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
2402         struct iwm_node *in)
2403 {
2404         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
2405         struct ieee80211_node *ni = &in->in_ni;
2406         struct ieee80211vap *vap = ni->ni_vap;
2407         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
2408         int failack = tx_resp->failure_frame;
2409
2410         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
2411
2412         /* Update rate control statistics. */
2413         if (status != IWM_TX_STATUS_SUCCESS &&
2414             status != IWM_TX_STATUS_DIRECT_DONE) {
2415                 ieee80211_ratectl_tx_complete(vap, ni,
2416                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
2417                 return (1);
2418         } else {
2419                 ieee80211_ratectl_tx_complete(vap, ni,
2420                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
2421                 return (0);
2422         }
2423 }
2424
2425 static void
2426 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
2427         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2428 {
2429         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
2430         int idx = cmd_hdr->idx;
2431         int qid = cmd_hdr->qid;
2432         struct iwm_tx_ring *ring = &sc->txq[qid];
2433         struct iwm_tx_data *txd = &ring->data[idx];
2434         struct iwm_node *in = txd->in;
2435         struct mbuf *m = txd->m;
2436         int status;
2437
2438         KASSERT(txd->done == 0, ("txd not done"));
2439         KASSERT(txd->in != NULL, ("txd without node"));
2440         KASSERT(txd->m != NULL, ("txd without mbuf"));
2441
2442         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2443
2444         sc->sc_tx_timer = 0;
2445
2446         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
2447
2448         /* Unmap and free mbuf. */
2449         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
2450         bus_dmamap_unload(ring->data_dmat, txd->map);
2451
2452         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2453             "free txd %p, in %p\n", txd, txd->in);
2454         txd->done = 1;
2455         txd->m = NULL;
2456         txd->in = NULL;
2457
2458         ieee80211_tx_complete(&in->in_ni, m, status);
2459
2460         if (--ring->queued < IWM_TX_RING_LOMARK) {
2461                 sc->qfullmsk &= ~(1 << ring->qid);
2462                 if (sc->qfullmsk == 0) {
2463                         /*
2464                          * Well, we're in interrupt context, but then again
2465                          * I guess net80211 does all sorts of stunts in
2466                          * interrupt context, so maybe this is no biggie.
2467                          */
2468                         iwm_start(sc);
2469                 }
2470         }
2471 }
2472
2473 /*
2474  * transmit side
2475  */
2476
2477 /*
2478  * Process a "command done" firmware notification.  This is where we wakeup
2479  * processes waiting for a synchronous command completion.
2480  * from if_iwn
2481  */
2482 static void
2483 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
2484 {
2485         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
2486         struct iwm_tx_data *data;
2487
2488         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
2489                 return; /* Not a command ack. */
2490         }
2491
2492         data = &ring->data[pkt->hdr.idx];
2493
2494         /* If the command was mapped in an mbuf, free it. */
2495         if (data->m != NULL) {
2496                 bus_dmamap_sync(ring->data_dmat, data->map,
2497                     BUS_DMASYNC_POSTWRITE);
2498                 bus_dmamap_unload(ring->data_dmat, data->map);
2499                 m_freem(data->m);
2500                 data->m = NULL;
2501         }
2502         wakeup(&ring->desc[pkt->hdr.idx]);
2503 }
2504
2505 #if 0
2506 /*
2507  * necessary only for block ack mode
2508  */
2509 void
2510 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
2511         uint16_t len)
2512 {
2513         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
2514         uint16_t w_val;
2515
2516         scd_bc_tbl = sc->sched_dma.vaddr;
2517
2518         len += 8; /* magic numbers came naturally from paris */
2519         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
2520                 len = roundup(len, 4) / 4;
2521
2522         w_val = htole16(sta_id << 12 | len);
2523
2524         /* Update TX scheduler. */
2525         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
2526         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2527             BUS_DMASYNC_PREWRITE);
2528
2529         /* I really wonder what this is ?!? */
2530         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
2531                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
2532                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2533                     BUS_DMASYNC_PREWRITE);
2534         }
2535 }
2536 #endif
2537
2538 /*
2539  * Take an 802.11 (non-n) rate, find the relevant rate
2540  * table entry.  return the index into in_ridx[].
2541  *
2542  * The caller then uses that index back into in_ridx
2543  * to figure out the rate index programmed /into/
2544  * the firmware for this given node.
2545  */
2546 static int
2547 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
2548     uint8_t rate)
2549 {
2550         int i;
2551         uint8_t r;
2552
2553         for (i = 0; i < nitems(in->in_ridx); i++) {
2554                 r = iwm_rates[in->in_ridx[i]].rate;
2555                 if (rate == r)
2556                         return (i);
2557         }
2558         /* XXX Return the first */
2559         /* XXX TODO: have it return the /lowest/ */
2560         return (0);
2561 }
2562
2563 /*
2564  * Fill in various bit for management frames, and leave them
2565  * unfilled for data frames (firmware takes care of that).
2566  * Return the selected TX rate.
2567  */
2568 static const struct iwm_rate *
2569 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
2570         struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
2571 {
2572         struct ieee80211com *ic = &sc->sc_ic;
2573         struct ieee80211_node *ni = &in->in_ni;
2574         const struct iwm_rate *rinfo;
2575         int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2576         int ridx, rate_flags;
2577
2578         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
2579         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
2580
2581         /*
2582          * XXX TODO: everything about the rate selection here is terrible!
2583          */
2584
2585         if (type == IEEE80211_FC0_TYPE_DATA) {
2586                 int i;
2587                 /* for data frames, use RS table */
2588                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
2589                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
2590                 ridx = in->in_ridx[i];
2591
2592                 /* This is the index into the programmed table */
2593                 tx->initial_rate_index = i;
2594                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
2595                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
2596                     "%s: start with i=%d, txrate %d\n",
2597                     __func__, i, iwm_rates[ridx].rate);
2598                 /* XXX no rate_n_flags? */
2599                 return &iwm_rates[ridx];
2600         }
2601
2602         /*
2603          * For non-data, use the lowest supported rate for the given
2604          * operational mode.
2605          *
2606          * Note: there may not be any rate control information available.
2607          * This driver currently assumes if we're transmitting data
2608          * frames, use the rate control table.  Grr.
2609          *
2610          * XXX TODO: use the configured rate for the traffic type!
2611          */
2612         if (ic->ic_curmode == IEEE80211_MODE_11A) {
2613                 /*
2614                  * XXX this assumes the mode is either 11a or not 11a;
2615                  * definitely won't work for 11n.
2616                  */
2617                 ridx = IWM_RIDX_OFDM;
2618         } else {
2619                 ridx = IWM_RIDX_CCK;
2620         }
2621
2622         rinfo = &iwm_rates[ridx];
2623
2624         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
2625             __func__, ridx,
2626             rinfo->rate,
2627             !! (IWM_RIDX_IS_CCK(ridx))
2628             );
2629
2630         /* XXX TODO: hard-coded TX antenna? */
2631         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
2632         if (IWM_RIDX_IS_CCK(ridx))
2633                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
2634         /* XXX hard-coded tx rate */
2635         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
2636
2637         return rinfo;
2638 }
2639
2640 #define TB0_SIZE 16
2641 static int
2642 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
2643 {
2644         struct ieee80211com *ic = &sc->sc_ic;
2645         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2646         struct iwm_node *in = IWM_NODE(ni);
2647         struct iwm_tx_ring *ring;
2648         struct iwm_tx_data *data;
2649         struct iwm_tfd *desc;
2650         struct iwm_device_cmd *cmd;
2651         struct iwm_tx_cmd *tx;
2652         struct ieee80211_frame *wh;
2653         struct ieee80211_key *k = NULL;
2654         struct mbuf *m1;
2655         const struct iwm_rate *rinfo;
2656         uint32_t flags;
2657         u_int hdrlen;
2658         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
2659         int nsegs;
2660         uint8_t tid, type;
2661         int i, totlen, error, pad;
2662
2663         wh = mtod(m, struct ieee80211_frame *);
2664         hdrlen = ieee80211_anyhdrsize(wh);
2665         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2666         tid = 0;
2667         ring = &sc->txq[ac];
2668         desc = &ring->desc[ring->cur];
2669         memset(desc, 0, sizeof(*desc));
2670         data = &ring->data[ring->cur];
2671
2672         /* Fill out iwm_tx_cmd to send to the firmware */
2673         cmd = &ring->cmd[ring->cur];
2674         cmd->hdr.code = IWM_TX_CMD;
2675         cmd->hdr.flags = 0;
2676         cmd->hdr.qid = ring->qid;
2677         cmd->hdr.idx = ring->cur;
2678
2679         tx = (void *)cmd->data;
2680         memset(tx, 0, sizeof(*tx));
2681
2682         rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
2683
2684         /* Encrypt the frame if need be. */
2685         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
2686                 /* Retrieve key for TX && do software encryption. */
2687                 k = ieee80211_crypto_encap(ni, m);
2688                 if (k == NULL) {
2689                         m_freem(m);
2690                         return (ENOBUFS);
2691                 }
2692                 /* 802.11 header may have moved. */
2693                 wh = mtod(m, struct ieee80211_frame *);
2694         }
2695
2696         if (ieee80211_radiotap_active_vap(vap)) {
2697                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
2698
2699                 tap->wt_flags = 0;
2700                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
2701                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
2702                 tap->wt_rate = rinfo->rate;
2703                 if (k != NULL)
2704                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2705                 ieee80211_radiotap_tx(vap, m);
2706         }
2707
2708
2709         totlen = m->m_pkthdr.len;
2710
2711         flags = 0;
2712         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2713                 flags |= IWM_TX_CMD_FLG_ACK;
2714         }
2715
2716         if (type != IEEE80211_FC0_TYPE_DATA
2717             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
2718             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2719                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
2720         }
2721
2722         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2723             type != IEEE80211_FC0_TYPE_DATA)
2724                 tx->sta_id = sc->sc_aux_sta.sta_id;
2725         else
2726                 tx->sta_id = IWM_STATION_ID;
2727
2728         if (type == IEEE80211_FC0_TYPE_MGT) {
2729                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2730
2731                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2732                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2733                         tx->pm_frame_timeout = htole16(3);
2734                 else
2735                         tx->pm_frame_timeout = htole16(2);
2736         } else {
2737                 tx->pm_frame_timeout = htole16(0);
2738         }
2739
2740         if (hdrlen & 3) {
2741                 /* First segment length must be a multiple of 4. */
2742                 flags |= IWM_TX_CMD_FLG_MH_PAD;
2743                 pad = 4 - (hdrlen & 3);
2744         } else
2745                 pad = 0;
2746
2747         tx->driver_txop = 0;
2748         tx->next_frame_len = 0;
2749
2750         tx->len = htole16(totlen);
2751         tx->tid_tspec = tid;
2752         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
2753
2754         /* Set physical address of "scratch area". */
2755         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
2756         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
2757
2758         /* Copy 802.11 header in TX command. */
2759         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
2760
2761         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
2762
2763         tx->sec_ctl = 0;
2764         tx->tx_flags |= htole32(flags);
2765
2766         /* Trim 802.11 header. */
2767         m_adj(m, hdrlen);
2768         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2769             segs, &nsegs, BUS_DMA_NOWAIT);
2770         if (error != 0) {
2771                 if (error != EFBIG) {
2772                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2773                             error);
2774                         m_freem(m);
2775                         return error;
2776                 }
2777                 /* Too many DMA segments, linearize mbuf. */
2778                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
2779                 if (m1 == NULL) {
2780                         device_printf(sc->sc_dev,
2781                             "%s: could not defrag mbuf\n", __func__);
2782                         m_freem(m);
2783                         return (ENOBUFS);
2784                 }
2785                 m = m1;
2786
2787                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
2788                     segs, &nsegs, BUS_DMA_NOWAIT);
2789                 if (error != 0) {
2790                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
2791                             error);
2792                         m_freem(m);
2793                         return error;
2794                 }
2795         }
2796         data->m = m;
2797         data->in = in;
2798         data->done = 0;
2799
2800         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2801             "sending txd %p, in %p\n", data, data->in);
2802         KASSERT(data->in != NULL, ("node is NULL"));
2803
2804         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2805             "sending data: qid=%d idx=%d len=%d nsegs=%d\n",
2806             ring->qid, ring->cur, totlen, nsegs);
2807
2808         /* Fill TX descriptor. */
2809         desc->num_tbs = 2 + nsegs;
2810
2811         desc->tbs[0].lo = htole32(data->cmd_paddr);
2812         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
2813             (TB0_SIZE << 4);
2814         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
2815         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
2816             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
2817               + hdrlen + pad - TB0_SIZE) << 4);
2818
2819         /* Other DMA segments are for data payload. */
2820         for (i = 0; i < nsegs; i++) {
2821                 seg = &segs[i];
2822                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
2823                 desc->tbs[i+2].hi_n_len = \
2824                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
2825                     | ((seg->ds_len) << 4);
2826         }
2827
2828         bus_dmamap_sync(ring->data_dmat, data->map,
2829             BUS_DMASYNC_PREWRITE);
2830         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
2831             BUS_DMASYNC_PREWRITE);
2832         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2833             BUS_DMASYNC_PREWRITE);
2834
2835 #if 0
2836         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
2837 #endif
2838
2839         /* Kick TX ring. */
2840         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
2841         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
2842
2843         /* Mark TX ring as full if we reach a certain threshold. */
2844         if (++ring->queued > IWM_TX_RING_HIMARK) {
2845                 sc->qfullmsk |= 1 << ring->qid;
2846         }
2847
2848         return 0;
2849 }
2850
2851 static int
2852 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
2853     const struct ieee80211_bpf_params *params)
2854 {
2855         struct ieee80211com *ic = ni->ni_ic;
2856         struct iwm_softc *sc = ic->ic_softc;
2857         int error = 0;
2858
2859         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2860             "->%s begin\n", __func__);
2861
2862         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
2863                 m_freem(m);
2864                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
2865                     "<-%s not RUNNING\n", __func__);
2866                 return (ENETDOWN);
2867         }
2868
2869         IWM_LOCK(sc);
2870         /* XXX fix this */
2871         if (params == NULL) {
2872                 error = iwm_tx(sc, m, ni, 0);
2873         } else {
2874                 error = iwm_tx(sc, m, ni, 0);
2875         }
2876         sc->sc_tx_timer = 5;
2877         IWM_UNLOCK(sc);
2878
2879         return (error);
2880 }
2881
2882 /*
2883  * mvm/tx.c
2884  */
2885
2886 #if 0
2887 /*
2888  * Note that there are transports that buffer frames before they reach
2889  * the firmware. This means that after flush_tx_path is called, the
2890  * queue might not be empty. The race-free way to handle this is to:
2891  * 1) set the station as draining
2892  * 2) flush the Tx path
2893  * 3) wait for the transport queues to be empty
2894  */
2895 int
2896 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
2897 {
2898         struct iwm_tx_path_flush_cmd flush_cmd = {
2899                 .queues_ctl = htole32(tfd_msk),
2900                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
2901         };
2902         int ret;
2903
2904         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
2905             sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
2906             sizeof(flush_cmd), &flush_cmd);
2907         if (ret)
2908                 device_printf(sc->sc_dev,
2909                     "Flushing tx queue failed: %d\n", ret);
2910         return ret;
2911 }
2912 #endif
2913
2914 /*
2915  * BEGIN mvm/sta.c
2916  */
2917
2918 static void
2919 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
2920         struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
2921 {
2922         memset(cmd_v5, 0, sizeof(*cmd_v5));
2923
2924         cmd_v5->add_modify = cmd_v6->add_modify;
2925         cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
2926         cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
2927         IEEE80211_ADDR_COPY(cmd_v5->addr, cmd_v6->addr);
2928         cmd_v5->sta_id = cmd_v6->sta_id;
2929         cmd_v5->modify_mask = cmd_v6->modify_mask;
2930         cmd_v5->station_flags = cmd_v6->station_flags;
2931         cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
2932         cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
2933         cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
2934         cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
2935         cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
2936         cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
2937         cmd_v5->assoc_id = cmd_v6->assoc_id;
2938         cmd_v5->beamform_flags = cmd_v6->beamform_flags;
2939         cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
2940 }
2941
2942 static int
2943 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
2944         struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
2945 {
2946         struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
2947
2948         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
2949                 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
2950                     sizeof(*cmd), cmd, status);
2951         }
2952
2953         iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
2954
2955         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
2956             &cmd_v5, status);
2957 }
2958
2959 /* send station add/update command to firmware */
2960 static int
2961 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
2962 {
2963         struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
2964         int ret;
2965         uint32_t status;
2966
2967         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
2968
2969         add_sta_cmd.sta_id = IWM_STATION_ID;
2970         add_sta_cmd.mac_id_n_color
2971             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
2972                 IWM_DEFAULT_COLOR));
2973         if (!update) {
2974                 add_sta_cmd.tfd_queue_msk = htole32(0xf);
2975                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
2976         }
2977         add_sta_cmd.add_modify = update ? 1 : 0;
2978         add_sta_cmd.station_flags_msk
2979             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
2980
2981         status = IWM_ADD_STA_SUCCESS;
2982         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
2983         if (ret)
2984                 return ret;
2985
2986         switch (status) {
2987         case IWM_ADD_STA_SUCCESS:
2988                 break;
2989         default:
2990                 ret = EIO;
2991                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
2992                 break;
2993         }
2994
2995         return ret;
2996 }
2997
2998 static int
2999 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3000 {
3001         int ret;
3002
3003         ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
3004         if (ret)
3005                 return ret;
3006
3007         return 0;
3008 }
3009
3010 static int
3011 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3012 {
3013         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3014 }
3015
3016 static int
3017 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3018         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3019 {
3020         struct iwm_mvm_add_sta_cmd_v6 cmd;
3021         int ret;
3022         uint32_t status;
3023
3024         memset(&cmd, 0, sizeof(cmd));
3025         cmd.sta_id = sta->sta_id;
3026         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3027
3028         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3029
3030         if (addr)
3031                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3032
3033         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3034         if (ret)
3035                 return ret;
3036
3037         switch (status) {
3038         case IWM_ADD_STA_SUCCESS:
3039                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3040                     "%s: Internal station added.\n", __func__);
3041                 return 0;
3042         default:
3043                 device_printf(sc->sc_dev,
3044                     "%s: Add internal station failed, status=0x%x\n",
3045                     __func__, status);
3046                 ret = EIO;
3047                 break;
3048         }
3049         return ret;
3050 }
3051
3052 static int
3053 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3054 {
3055         int ret;
3056
3057         sc->sc_aux_sta.sta_id = 3;
3058         sc->sc_aux_sta.tfd_queue_msk = 0;
3059
3060         ret = iwm_mvm_add_int_sta_common(sc,
3061             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3062
3063         if (ret)
3064                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3065         return ret;
3066 }
3067
3068 /*
3069  * END mvm/sta.c
3070  */
3071
3072 /*
3073  * BEGIN mvm/quota.c
3074  */
3075
3076 static int
3077 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3078 {
3079         struct iwm_time_quota_cmd cmd;
3080         int i, idx, ret, num_active_macs, quota, quota_rem;
3081         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3082         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3083         uint16_t id;
3084
3085         memset(&cmd, 0, sizeof(cmd));
3086
3087         /* currently, PHY ID == binding ID */
3088         if (in) {
3089                 id = in->in_phyctxt->id;
3090                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3091                 colors[id] = in->in_phyctxt->color;
3092
3093                 if (1)
3094                         n_ifs[id] = 1;
3095         }
3096
3097         /*
3098          * The FW's scheduling session consists of
3099          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3100          * equally between all the bindings that require quota
3101          */
3102         num_active_macs = 0;
3103         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3104                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3105                 num_active_macs += n_ifs[i];
3106         }
3107
3108         quota = 0;
3109         quota_rem = 0;
3110         if (num_active_macs) {
3111                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3112                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3113         }
3114
3115         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3116                 if (colors[i] < 0)
3117                         continue;
3118
3119                 cmd.quotas[idx].id_and_color =
3120                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3121
3122                 if (n_ifs[i] <= 0) {
3123                         cmd.quotas[idx].quota = htole32(0);
3124                         cmd.quotas[idx].max_duration = htole32(0);
3125                 } else {
3126                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3127                         cmd.quotas[idx].max_duration = htole32(0);
3128                 }
3129                 idx++;
3130         }
3131
3132         /* Give the remainder of the session to the first binding */
3133         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3134
3135         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3136             sizeof(cmd), &cmd);
3137         if (ret)
3138                 device_printf(sc->sc_dev,
3139                     "%s: Failed to send quota: %d\n", __func__, ret);
3140         return ret;
3141 }
3142
3143 /*
3144  * END mvm/quota.c
3145  */
3146
3147 /*
3148  * ieee80211 routines
3149  */
3150
3151 /*
3152  * Change to AUTH state in 80211 state machine.  Roughly matches what
3153  * Linux does in bss_info_changed().
3154  */
3155 static int
3156 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3157 {
3158         struct ieee80211_node *ni;
3159         struct iwm_node *in;
3160         struct iwm_vap *iv = IWM_VAP(vap);
3161         uint32_t duration;
3162         uint32_t min_duration;
3163         int error;
3164
3165         /*
3166          * XXX i have a feeling that the vap node is being
3167          * freed from underneath us. Grr.
3168          */
3169         ni = ieee80211_ref_node(vap->iv_bss);
3170         in = IWM_NODE(ni);
3171         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3172             "%s: called; vap=%p, bss ni=%p\n",
3173             __func__,
3174             vap,
3175             ni);
3176
3177         in->in_assoc = 0;
3178
3179         error = iwm_allow_mcast(vap, sc);
3180         if (error) {
3181                 device_printf(sc->sc_dev,
3182                     "%s: failed to set multicast\n", __func__);
3183                 goto out;
3184         }
3185
3186         /*
3187          * This is where it deviates from what Linux does.
3188          *
3189          * Linux iwlwifi doesn't reset the nic each time, nor does it
3190          * call ctxt_add() here.  Instead, it adds it during vap creation,
3191          * and always does does a mac_ctx_changed().
3192          *
3193          * The openbsd port doesn't attempt to do that - it reset things
3194          * at odd states and does the add here.
3195          *
3196          * So, until the state handling is fixed (ie, we never reset
3197          * the NIC except for a firmware failure, which should drag
3198          * the NIC back to IDLE, re-setup and re-add all the mac/phy
3199          * contexts that are required), let's do a dirty hack here.
3200          */
3201         if (iv->is_uploaded) {
3202                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3203                         device_printf(sc->sc_dev,
3204                             "%s: failed to add MAC\n", __func__);
3205                         goto out;
3206                 }
3207         } else {
3208                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
3209                         device_printf(sc->sc_dev,
3210                             "%s: failed to add MAC\n", __func__);
3211                         goto out;
3212                 }
3213         }
3214
3215         if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
3216             in->in_ni.ni_chan, 1, 1)) != 0) {
3217                 device_printf(sc->sc_dev,
3218                     "%s: failed add phy ctxt\n", __func__);
3219                 goto out;
3220         }
3221         in->in_phyctxt = &sc->sc_phyctxt[0];
3222
3223         if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
3224                 device_printf(sc->sc_dev,
3225                     "%s: binding cmd\n", __func__);
3226                 goto out;
3227         }
3228
3229         if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
3230                 device_printf(sc->sc_dev,
3231                     "%s: failed to add MAC\n", __func__);
3232                 goto out;
3233         }
3234
3235         /* a bit superfluous? */
3236         while (sc->sc_auth_prot)
3237                 msleep(&sc->sc_auth_prot, &sc->sc_mtx, 0, "iwmauth", 0);
3238         sc->sc_auth_prot = 1;
3239
3240         duration = min(IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
3241             200 + in->in_ni.ni_intval);
3242         min_duration = min(IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
3243             100 + in->in_ni.ni_intval);
3244         iwm_mvm_protect_session(sc, in, duration, min_duration, 500);
3245
3246         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3247             "%s: waiting for auth_prot\n", __func__);
3248         while (sc->sc_auth_prot != 2) {
3249                 /*
3250                  * well, meh, but if the kernel is sleeping for half a
3251                  * second, we have bigger problems
3252                  */
3253                 if (sc->sc_auth_prot == 0) {
3254                         device_printf(sc->sc_dev,
3255                             "%s: missed auth window!\n", __func__);
3256                         error = ETIMEDOUT;
3257                         goto out;
3258                 } else if (sc->sc_auth_prot == -1) {
3259                         device_printf(sc->sc_dev,
3260                             "%s: no time event, denied!\n", __func__);
3261                         sc->sc_auth_prot = 0;
3262                         error = EAUTH;
3263                         goto out;
3264                 }
3265                 msleep(&sc->sc_auth_prot, &sc->sc_mtx, 0, "iwmau2", 0);
3266         }
3267         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "<-%s\n", __func__);
3268         error = 0;
3269 out:
3270         ieee80211_free_node(ni);
3271         return (error);
3272 }
3273
3274 static int
3275 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
3276 {
3277         struct iwm_node *in = IWM_NODE(vap->iv_bss);
3278         int error;
3279
3280         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
3281                 device_printf(sc->sc_dev,
3282                     "%s: failed to update STA\n", __func__);
3283                 return error;
3284         }
3285
3286         in->in_assoc = 1;
3287         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3288                 device_printf(sc->sc_dev,
3289                     "%s: failed to update MAC\n", __func__);
3290                 return error;
3291         }
3292
3293         return 0;
3294 }
3295
3296 static int
3297 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
3298 {
3299         /*
3300          * Ok, so *technically* the proper set of calls for going
3301          * from RUN back to SCAN is:
3302          *
3303          * iwm_mvm_power_mac_disable(sc, in);
3304          * iwm_mvm_mac_ctxt_changed(sc, in);
3305          * iwm_mvm_rm_sta(sc, in);
3306          * iwm_mvm_update_quotas(sc, NULL);
3307          * iwm_mvm_mac_ctxt_changed(sc, in);
3308          * iwm_mvm_binding_remove_vif(sc, in);
3309          * iwm_mvm_mac_ctxt_remove(sc, in);
3310          *
3311          * However, that freezes the device not matter which permutations
3312          * and modifications are attempted.  Obviously, this driver is missing
3313          * something since it works in the Linux driver, but figuring out what
3314          * is missing is a little more complicated.  Now, since we're going
3315          * back to nothing anyway, we'll just do a complete device reset.
3316          * Up your's, device!
3317          */
3318         //iwm_mvm_flush_tx_path(sc, 0xf, 1);
3319         iwm_stop_device(sc);
3320         iwm_init_hw(sc);
3321         if (in)
3322                 in->in_assoc = 0;
3323         return 0;
3324
3325 #if 0
3326         int error;
3327
3328         iwm_mvm_power_mac_disable(sc, in);
3329
3330         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3331                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
3332                 return error;
3333         }
3334
3335         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
3336                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
3337                 return error;
3338         }
3339         error = iwm_mvm_rm_sta(sc, in);
3340         in->in_assoc = 0;
3341         iwm_mvm_update_quotas(sc, NULL);
3342         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
3343                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
3344                 return error;
3345         }
3346         iwm_mvm_binding_remove_vif(sc, in);
3347
3348         iwm_mvm_mac_ctxt_remove(sc, in);
3349
3350         return error;
3351 #endif
3352 }
3353
3354 static struct ieee80211_node *
3355 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3356 {
3357         return malloc(sizeof (struct iwm_node), M_80211_NODE,
3358             M_NOWAIT | M_ZERO);
3359 }
3360
3361 static void
3362 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
3363 {
3364         struct ieee80211_node *ni = &in->in_ni;
3365         struct iwm_lq_cmd *lq = &in->in_lq;
3366         int nrates = ni->ni_rates.rs_nrates;
3367         int i, ridx, tab = 0;
3368         int txant = 0;
3369
3370         if (nrates > nitems(lq->rs_table)) {
3371                 device_printf(sc->sc_dev,
3372                     "%s: node supports %d rates, driver handles "
3373                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
3374                 return;
3375         }
3376
3377         /*
3378          * XXX .. and most of iwm_node is not initialised explicitly;
3379          * it's all just 0x0 passed to the firmware.
3380          */
3381
3382         /* first figure out which rates we should support */
3383         /* XXX TODO: this isn't 11n aware /at all/ */
3384         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
3385         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3386             "%s: nrates=%d\n", __func__, nrates);
3387         for (i = 0; i < nrates; i++) {
3388                 int rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
3389
3390                 /* Map 802.11 rate to HW rate index. */
3391                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
3392                         if (iwm_rates[ridx].rate == rate)
3393                                 break;
3394                 if (ridx > IWM_RIDX_MAX) {
3395                         device_printf(sc->sc_dev,
3396                             "%s: WARNING: device rate for %d not found!\n",
3397                             __func__, rate);
3398                 } else {
3399                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3400                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
3401                             __func__,
3402                             i,
3403                             rate,
3404                             ridx);
3405                         in->in_ridx[i] = ridx;
3406                 }
3407         }
3408
3409         /* then construct a lq_cmd based on those */
3410         memset(lq, 0, sizeof(*lq));
3411         lq->sta_id = IWM_STATION_ID;
3412
3413         /*
3414          * are these used? (we don't do SISO or MIMO)
3415          * need to set them to non-zero, though, or we get an error.
3416          */
3417         lq->single_stream_ant_msk = 1;
3418         lq->dual_stream_ant_msk = 1;
3419
3420         /*
3421          * Build the actual rate selection table.
3422          * The lowest bits are the rates.  Additionally,
3423          * CCK needs bit 9 to be set.  The rest of the bits
3424          * we add to the table select the tx antenna
3425          * Note that we add the rates in the highest rate first
3426          * (opposite of ni_rates).
3427          */
3428         /*
3429          * XXX TODO: this should be looping over the min of nrates
3430          * and LQ_MAX_RETRY_NUM.  Sigh.
3431          */
3432         for (i = 0; i < nrates; i++) {
3433                 int nextant;
3434
3435                 if (txant == 0)
3436                         txant = IWM_FW_VALID_TX_ANT(sc);
3437                 nextant = 1<<(ffs(txant)-1);
3438                 txant &= ~nextant;
3439
3440                 /*
3441                  * Map the rate id into a rate index into
3442                  * our hardware table containing the
3443                  * configuration to use for this rate.
3444                  */
3445                 ridx = in->in_ridx[(nrates-1)-i];
3446                 tab = iwm_rates[ridx].plcp;
3447                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
3448                 if (IWM_RIDX_IS_CCK(ridx))
3449                         tab |= IWM_RATE_MCS_CCK_MSK;
3450                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3451                     "station rate i=%d, rate=%d, hw=%x\n",
3452                     i, iwm_rates[ridx].rate, tab);
3453                 lq->rs_table[i] = htole32(tab);
3454         }
3455         /* then fill the rest with the lowest possible rate */
3456         for (i = nrates; i < nitems(lq->rs_table); i++) {
3457                 KASSERT(tab != 0, ("invalid tab"));
3458                 lq->rs_table[i] = htole32(tab);
3459         }
3460 }
3461
3462 static int
3463 iwm_media_change(struct ifnet *ifp)
3464 {
3465         struct ieee80211vap *vap = ifp->if_softc;
3466         struct ieee80211com *ic = vap->iv_ic;
3467         struct iwm_softc *sc = ic->ic_softc;
3468         int error;
3469
3470         error = ieee80211_media_change(ifp);
3471         if (error != ENETRESET)
3472                 return error;
3473
3474         IWM_LOCK(sc);
3475         if (ic->ic_nrunning > 0) {
3476                 iwm_stop(sc);
3477                 iwm_init(sc);
3478         }
3479         IWM_UNLOCK(sc);
3480         return error;
3481 }
3482
3483
3484 static int
3485 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
3486 {
3487         struct iwm_vap *ivp = IWM_VAP(vap);
3488         struct ieee80211com *ic = vap->iv_ic;
3489         struct iwm_softc *sc = ic->ic_softc;
3490         struct iwm_node *in;
3491         int error;
3492
3493         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3494             "switching state %s -> %s\n",
3495             ieee80211_state_name[vap->iv_state],
3496             ieee80211_state_name[nstate]);
3497         IEEE80211_UNLOCK(ic);
3498         IWM_LOCK(sc);
3499         /* disable beacon filtering if we're hopping out of RUN */
3500         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
3501                 iwm_mvm_disable_beacon_filter(sc);
3502
3503                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
3504                         in->in_assoc = 0;
3505
3506                 iwm_release(sc, NULL);
3507
3508                 /*
3509                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
3510                  * above then the card will be completely reinitialized,
3511                  * so the driver must do everything necessary to bring the card
3512                  * from INIT to SCAN.
3513                  *
3514                  * Additionally, upon receiving deauth frame from AP,
3515                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
3516                  * state. This will also fail with this driver, so bring the FSM
3517                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
3518                  *
3519                  * XXX TODO: fix this for FreeBSD!
3520                  */
3521                 if (nstate == IEEE80211_S_SCAN ||
3522                     nstate == IEEE80211_S_AUTH ||
3523                     nstate == IEEE80211_S_ASSOC) {
3524                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3525                             "Force transition to INIT; MGT=%d\n", arg);
3526                         IWM_UNLOCK(sc);
3527                         IEEE80211_LOCK(ic);
3528                         vap->iv_newstate(vap, IEEE80211_S_INIT, arg);
3529                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
3530                             "Going INIT->SCAN\n");
3531                         nstate = IEEE80211_S_SCAN;
3532                         IEEE80211_UNLOCK(ic);
3533                         IWM_LOCK(sc);
3534                 }
3535         }
3536
3537         switch (nstate) {
3538         case IEEE80211_S_INIT:
3539                 sc->sc_scanband = 0;
3540                 break;
3541
3542         case IEEE80211_S_AUTH:
3543                 if ((error = iwm_auth(vap, sc)) != 0) {
3544                         device_printf(sc->sc_dev,
3545                             "%s: could not move to auth state: %d\n",
3546                             __func__, error);
3547                         break;
3548                 }
3549                 break;
3550
3551         case IEEE80211_S_ASSOC:
3552                 if ((error = iwm_assoc(vap, sc)) != 0) {
3553                         device_printf(sc->sc_dev,
3554                             "%s: failed to associate: %d\n", __func__,
3555                             error);
3556                         break;
3557                 }
3558                 break;
3559
3560         case IEEE80211_S_RUN:
3561         {
3562                 struct iwm_host_cmd cmd = {
3563                         .id = IWM_LQ_CMD,
3564                         .len = { sizeof(in->in_lq), },
3565                         .flags = IWM_CMD_SYNC,
3566                 };
3567
3568                 /* Update the association state, now we have it all */
3569                 /* (eg associd comes in at this point */
3570                 error = iwm_assoc(vap, sc);
3571                 if (error != 0) {
3572                         device_printf(sc->sc_dev,
3573                             "%s: failed to update association state: %d\n",
3574                             __func__,
3575                             error);
3576                         break;
3577                 }
3578
3579                 in = IWM_NODE(vap->iv_bss);
3580                 iwm_mvm_power_mac_update_mode(sc, in);
3581                 iwm_mvm_enable_beacon_filter(sc, in);
3582                 iwm_mvm_update_quotas(sc, in);
3583                 iwm_setrates(sc, in);
3584
3585                 cmd.data[0] = &in->in_lq;
3586                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
3587                         device_printf(sc->sc_dev,
3588                             "%s: IWM_LQ_CMD failed\n", __func__);
3589                 }
3590
3591                 break;
3592         }
3593
3594         default:
3595                 break;
3596         }
3597         IWM_UNLOCK(sc);
3598         IEEE80211_LOCK(ic);
3599
3600         return (ivp->iv_newstate(vap, nstate, arg));
3601 }
3602
3603 void
3604 iwm_endscan_cb(void *arg, int pending)
3605 {
3606         struct iwm_softc *sc = arg;
3607         struct ieee80211com *ic = &sc->sc_ic;
3608         int done;
3609         int error;
3610
3611         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
3612             "%s: scan ended\n",
3613             __func__);
3614
3615         IWM_LOCK(sc);
3616         if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
3617             sc->sc_nvm.sku_cap_band_52GHz_enable) {
3618                 done = 0;
3619                 if ((error = iwm_mvm_scan_request(sc,
3620                     IEEE80211_CHAN_5GHZ, 0, NULL, 0)) != 0) {
3621                         device_printf(sc->sc_dev, "could not initiate scan\n");
3622                         done = 1;
3623                 }
3624         } else {
3625                 done = 1;
3626         }
3627
3628         if (done) {
3629                 IWM_UNLOCK(sc);
3630                 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
3631                 IWM_LOCK(sc);
3632                 sc->sc_scanband = 0;
3633         }
3634         IWM_UNLOCK(sc);
3635 }
3636
3637 static int
3638 iwm_init_hw(struct iwm_softc *sc)
3639 {
3640         struct ieee80211com *ic = &sc->sc_ic;
3641         int error, i, qid;
3642
3643         if ((error = iwm_start_hw(sc)) != 0)
3644                 return error;
3645
3646         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
3647                 return error;
3648         }
3649
3650         /*
3651          * should stop and start HW since that INIT
3652          * image just loaded
3653          */
3654         iwm_stop_device(sc);
3655         if ((error = iwm_start_hw(sc)) != 0) {
3656                 device_printf(sc->sc_dev, "could not initialize hardware\n");
3657                 return error;
3658         }
3659
3660         /* omstart, this time with the regular firmware */
3661         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
3662         if (error) {
3663                 device_printf(sc->sc_dev, "could not load firmware\n");
3664                 goto error;
3665         }
3666
3667         if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
3668                 goto error;
3669
3670         /* Send phy db control command and then phy db calibration*/
3671         if ((error = iwm_send_phy_db_data(sc)) != 0)
3672                 goto error;
3673
3674         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
3675                 goto error;
3676
3677         /* Add auxiliary station for scanning */
3678         if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
3679                 goto error;
3680
3681         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
3682                 /*
3683                  * The channel used here isn't relevant as it's
3684                  * going to be overwritten in the other flows.
3685                  * For now use the first channel we have.
3686                  */
3687                 if ((error = iwm_mvm_phy_ctxt_add(sc,
3688                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
3689                         goto error;
3690         }
3691
3692         error = iwm_mvm_power_update_device(sc);
3693         if (error)
3694                 goto error;
3695
3696         /* Mark TX rings as active. */
3697         for (qid = 0; qid < 4; qid++) {
3698                 iwm_enable_txq(sc, qid, qid);
3699         }
3700
3701         return 0;
3702
3703  error:
3704         iwm_stop_device(sc);
3705         return error;
3706 }
3707
3708 /* Allow multicast from our BSSID. */
3709 static int
3710 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
3711 {
3712         struct ieee80211_node *ni = vap->iv_bss;
3713         struct iwm_mcast_filter_cmd *cmd;
3714         size_t size;
3715         int error;
3716
3717         size = roundup(sizeof(*cmd), 4);
3718         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
3719         if (cmd == NULL)
3720                 return ENOMEM;
3721         cmd->filter_own = 1;
3722         cmd->port_id = 0;
3723         cmd->count = 0;
3724         cmd->pass_all = 1;
3725         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
3726
3727         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
3728             IWM_CMD_SYNC, size, cmd);
3729         free(cmd, M_DEVBUF);
3730
3731         return (error);
3732 }
3733
3734 static void
3735 iwm_init(struct iwm_softc *sc)
3736 {
3737         int error;
3738
3739         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
3740                 return;
3741         }
3742         sc->sc_generation++;
3743         sc->sc_flags &= ~IWM_FLAG_STOPPED;
3744
3745         if ((error = iwm_init_hw(sc)) != 0) {
3746                 iwm_stop(sc);
3747                 return;
3748         }
3749
3750         /*
3751          * Ok, firmware loaded and we are jogging
3752          */
3753         sc->sc_flags |= IWM_FLAG_HW_INITED;
3754         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3755 }
3756
3757 static int
3758 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
3759 {
3760         struct iwm_softc *sc;
3761         int error;
3762
3763         sc = ic->ic_softc;
3764
3765         IWM_LOCK(sc);
3766         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3767                 IWM_UNLOCK(sc);
3768                 return (ENXIO);
3769         }
3770         error = mbufq_enqueue(&sc->sc_snd, m);
3771         if (error) {
3772                 IWM_UNLOCK(sc);
3773                 return (error);
3774         }
3775         iwm_start(sc);
3776         IWM_UNLOCK(sc);
3777         return (0);
3778 }
3779
3780 /*
3781  * Dequeue packets from sendq and call send.
3782  */
3783 static void
3784 iwm_start(struct iwm_softc *sc)
3785 {
3786         struct ieee80211_node *ni;
3787         struct mbuf *m;
3788         int ac = 0;
3789
3790         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
3791         while (sc->qfullmsk == 0 &&
3792                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
3793                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3794                 if (iwm_tx(sc, m, ni, ac) != 0) {
3795                         if_inc_counter(ni->ni_vap->iv_ifp,
3796                             IFCOUNTER_OERRORS, 1);
3797                         ieee80211_free_node(ni);
3798                         continue;
3799                 }
3800                 sc->sc_tx_timer = 15;
3801         }
3802         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
3803 }
3804
3805 static void
3806 iwm_stop(struct iwm_softc *sc)
3807 {
3808
3809         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
3810         sc->sc_flags |= IWM_FLAG_STOPPED;
3811         sc->sc_generation++;
3812         sc->sc_scanband = 0;
3813         sc->sc_auth_prot = 0;
3814         sc->sc_tx_timer = 0;
3815         iwm_stop_device(sc);
3816 }
3817
3818 static void
3819 iwm_watchdog(void *arg)
3820 {
3821         struct iwm_softc *sc = arg;
3822
3823         if (sc->sc_tx_timer > 0) {
3824                 if (--sc->sc_tx_timer == 0) {
3825                         device_printf(sc->sc_dev, "device timeout\n");
3826 #ifdef IWM_DEBUG
3827                         iwm_nic_error(sc);
3828 #endif
3829                         iwm_stop(sc);
3830                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
3831                         return;
3832                 }
3833         }
3834         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3835 }
3836
3837 static void
3838 iwm_parent(struct ieee80211com *ic)
3839 {
3840         struct iwm_softc *sc = ic->ic_softc;
3841         int startall = 0;
3842
3843         IWM_LOCK(sc);
3844         if (ic->ic_nrunning > 0) {
3845                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
3846                         iwm_init(sc);
3847                         startall = 1;
3848                 }
3849         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
3850                 iwm_stop(sc);
3851         IWM_UNLOCK(sc);
3852         if (startall)
3853                 ieee80211_start_all(ic);
3854 }
3855
3856 /*
3857  * The interrupt side of things
3858  */
3859
3860 /*
3861  * error dumping routines are from iwlwifi/mvm/utils.c
3862  */
3863
3864 /*
3865  * Note: This structure is read from the device with IO accesses,
3866  * and the reading already does the endian conversion. As it is
3867  * read with uint32_t-sized accesses, any members with a different size
3868  * need to be ordered correctly though!
3869  */
3870 struct iwm_error_event_table {
3871         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
3872         uint32_t error_id;              /* type of error */
3873         uint32_t pc;                    /* program counter */
3874         uint32_t blink1;                /* branch link */
3875         uint32_t blink2;                /* branch link */
3876         uint32_t ilink1;                /* interrupt link */
3877         uint32_t ilink2;                /* interrupt link */
3878         uint32_t data1;         /* error-specific data */
3879         uint32_t data2;         /* error-specific data */
3880         uint32_t data3;         /* error-specific data */
3881         uint32_t bcon_time;             /* beacon timer */
3882         uint32_t tsf_low;               /* network timestamp function timer */
3883         uint32_t tsf_hi;                /* network timestamp function timer */
3884         uint32_t gp1;           /* GP1 timer register */
3885         uint32_t gp2;           /* GP2 timer register */
3886         uint32_t gp3;           /* GP3 timer register */
3887         uint32_t ucode_ver;             /* uCode version */
3888         uint32_t hw_ver;                /* HW Silicon version */
3889         uint32_t brd_ver;               /* HW board version */
3890         uint32_t log_pc;                /* log program counter */
3891         uint32_t frame_ptr;             /* frame pointer */
3892         uint32_t stack_ptr;             /* stack pointer */
3893         uint32_t hcmd;          /* last host command header */
3894         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
3895                                  * rxtx_flag */
3896         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
3897                                  * host_flag */
3898         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
3899                                  * enc_flag */
3900         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
3901                                  * time_flag */
3902         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
3903                                  * wico interrupt */
3904         uint32_t isr_pref;              /* isr status register LMPM_NIC_PREF_STAT */
3905         uint32_t wait_event;            /* wait event() caller address */
3906         uint32_t l2p_control;   /* L2pControlField */
3907         uint32_t l2p_duration;  /* L2pDurationField */
3908         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
3909         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
3910         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
3911                                  * (LMPM_PMG_SEL) */
3912         uint32_t u_timestamp;   /* indicate when the date and time of the
3913                                  * compilation */
3914         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
3915 } __packed;
3916
3917 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
3918 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
3919
3920 #ifdef IWM_DEBUG
3921 struct {
3922         const char *name;
3923         uint8_t num;
3924 } advanced_lookup[] = {
3925         { "NMI_INTERRUPT_WDG", 0x34 },
3926         { "SYSASSERT", 0x35 },
3927         { "UCODE_VERSION_MISMATCH", 0x37 },
3928         { "BAD_COMMAND", 0x38 },
3929         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
3930         { "FATAL_ERROR", 0x3D },
3931         { "NMI_TRM_HW_ERR", 0x46 },
3932         { "NMI_INTERRUPT_TRM", 0x4C },
3933         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
3934         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
3935         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
3936         { "NMI_INTERRUPT_HOST", 0x66 },
3937         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
3938         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
3939         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
3940         { "ADVANCED_SYSASSERT", 0 },
3941 };
3942
3943 static const char *
3944 iwm_desc_lookup(uint32_t num)
3945 {
3946         int i;
3947
3948         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
3949                 if (advanced_lookup[i].num == num)
3950                         return advanced_lookup[i].name;
3951
3952         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
3953         return advanced_lookup[i].name;
3954 }
3955
3956 /*
3957  * Support for dumping the error log seemed like a good idea ...
3958  * but it's mostly hex junk and the only sensible thing is the
3959  * hw/ucode revision (which we know anyway).  Since it's here,
3960  * I'll just leave it in, just in case e.g. the Intel guys want to
3961  * help us decipher some "ADVANCED_SYSASSERT" later.
3962  */
3963 static void
3964 iwm_nic_error(struct iwm_softc *sc)
3965 {
3966         struct iwm_error_event_table table;
3967         uint32_t base;
3968
3969         device_printf(sc->sc_dev, "dumping device error log\n");
3970         base = sc->sc_uc.uc_error_event_table;
3971         if (base < 0x800000 || base >= 0x80C000) {
3972                 device_printf(sc->sc_dev,
3973                     "Not valid error log pointer 0x%08x\n", base);
3974                 return;
3975         }
3976
3977         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
3978                 device_printf(sc->sc_dev, "reading errlog failed\n");
3979                 return;
3980         }
3981
3982         if (!table.valid) {
3983                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
3984                 return;
3985         }
3986
3987         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
3988                 device_printf(sc->sc_dev, "Start IWL Error Log Dump:\n");
3989                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
3990                     sc->sc_flags, table.valid);
3991         }
3992
3993         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
3994                 iwm_desc_lookup(table.error_id));
3995         device_printf(sc->sc_dev, "%08X | uPc\n", table.pc);
3996         device_printf(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
3997         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
3998         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
3999         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
4000         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
4001         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
4002         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
4003         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
4004         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
4005         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
4006         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
4007         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
4008         device_printf(sc->sc_dev, "%08X | time gp3\n", table.gp3);
4009         device_printf(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
4010         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
4011         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
4012         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
4013         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
4014         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
4015         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
4016         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
4017         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
4018         device_printf(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
4019         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
4020         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
4021         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
4022         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
4023         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
4024         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
4025         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
4026         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
4027 }
4028 #endif
4029
4030 #define SYNC_RESP_STRUCT(_var_, _pkt_)                                  \
4031 do {                                                                    \
4032         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4033         _var_ = (void *)((_pkt_)+1);                                    \
4034 } while (/*CONSTCOND*/0)
4035
4036 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)                              \
4037 do {                                                                    \
4038         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
4039         _ptr_ = (void *)((_pkt_)+1);                                    \
4040 } while (/*CONSTCOND*/0)
4041
4042 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
4043
4044 /*
4045  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
4046  * Basic structure from if_iwn
4047  */
4048 static void
4049 iwm_notif_intr(struct iwm_softc *sc)
4050 {
4051         uint16_t hw;
4052
4053         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
4054             BUS_DMASYNC_POSTREAD);
4055
4056         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
4057         while (sc->rxq.cur != hw) {
4058                 struct iwm_rx_ring *ring = &sc->rxq;
4059                 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
4060                 struct iwm_rx_packet *pkt;
4061                 struct iwm_cmd_response *cresp;
4062                 int qid, idx;
4063
4064                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4065                     BUS_DMASYNC_POSTREAD);
4066                 pkt = mtod(data->m, struct iwm_rx_packet *);
4067
4068                 qid = pkt->hdr.qid & ~0x80;
4069                 idx = pkt->hdr.idx;
4070
4071                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4072                     "rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
4073                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
4074                     pkt->hdr.code, sc->rxq.cur, hw);
4075
4076                 /*
4077                  * randomly get these from the firmware, no idea why.
4078                  * they at least seem harmless, so just ignore them for now
4079                  */
4080                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
4081                     || pkt->len_n_flags == htole32(0x55550000))) {
4082                         ADVANCE_RXQ(sc);
4083                         continue;
4084                 }
4085
4086                 switch (pkt->hdr.code) {
4087                 case IWM_REPLY_RX_PHY_CMD:
4088                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
4089                         break;
4090
4091                 case IWM_REPLY_RX_MPDU_CMD:
4092                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
4093                         break;
4094
4095                 case IWM_TX_CMD:
4096                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
4097                         break;
4098
4099                 case IWM_MISSED_BEACONS_NOTIFICATION: {
4100                         struct iwm_missed_beacons_notif *resp;
4101                         int missed;
4102
4103                         /* XXX look at mac_id to determine interface ID */
4104                         struct ieee80211com *ic = &sc->sc_ic;
4105                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4106
4107                         SYNC_RESP_STRUCT(resp, pkt);
4108                         missed = le32toh(resp->consec_missed_beacons);
4109
4110                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
4111                             "%s: MISSED_BEACON: mac_id=%d, "
4112                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
4113                             "num_rx=%d\n",
4114                             __func__,
4115                             le32toh(resp->mac_id),
4116                             le32toh(resp->consec_missed_beacons_since_last_rx),
4117                             le32toh(resp->consec_missed_beacons),
4118                             le32toh(resp->num_expected_beacons),
4119                             le32toh(resp->num_recvd_beacons));
4120
4121                         /* Be paranoid */
4122                         if (vap == NULL)
4123                                 break;
4124
4125                         /* XXX no net80211 locking? */
4126                         if (vap->iv_state == IEEE80211_S_RUN &&
4127                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
4128                                 if (missed > vap->iv_bmissthreshold) {
4129                                         /* XXX bad locking; turn into task */
4130                                         IWM_UNLOCK(sc);
4131                                         ieee80211_beacon_miss(ic);
4132                                         IWM_LOCK(sc);
4133                                 }
4134                         }
4135
4136                         break; }
4137
4138                 case IWM_MVM_ALIVE: {
4139                         struct iwm_mvm_alive_resp *resp;
4140                         SYNC_RESP_STRUCT(resp, pkt);
4141
4142                         sc->sc_uc.uc_error_event_table
4143                             = le32toh(resp->error_event_table_ptr);
4144                         sc->sc_uc.uc_log_event_table
4145                             = le32toh(resp->log_event_table_ptr);
4146                         sc->sched_base = le32toh(resp->scd_base_ptr);
4147                         sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
4148
4149                         sc->sc_uc.uc_intr = 1;
4150                         wakeup(&sc->sc_uc);
4151                         break; }
4152
4153                 case IWM_CALIB_RES_NOTIF_PHY_DB: {
4154                         struct iwm_calib_res_notif_phy_db *phy_db_notif;
4155                         SYNC_RESP_STRUCT(phy_db_notif, pkt);
4156
4157                         iwm_phy_db_set_section(sc, phy_db_notif);
4158
4159                         break; }
4160
4161                 case IWM_STATISTICS_NOTIFICATION: {
4162                         struct iwm_notif_statistics *stats;
4163                         SYNC_RESP_STRUCT(stats, pkt);
4164                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
4165                         sc->sc_noise = iwm_get_noise(&stats->rx.general);
4166                         break; }
4167
4168                 case IWM_NVM_ACCESS_CMD:
4169                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
4170                                 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
4171                                     BUS_DMASYNC_POSTREAD);
4172                                 memcpy(sc->sc_cmd_resp,
4173                                     pkt, sizeof(sc->sc_cmd_resp));
4174                         }
4175                         break;
4176
4177                 case IWM_PHY_CONFIGURATION_CMD:
4178                 case IWM_TX_ANT_CONFIGURATION_CMD:
4179                 case IWM_ADD_STA:
4180                 case IWM_MAC_CONTEXT_CMD:
4181                 case IWM_REPLY_SF_CFG_CMD:
4182                 case IWM_POWER_TABLE_CMD:
4183                 case IWM_PHY_CONTEXT_CMD:
4184                 case IWM_BINDING_CONTEXT_CMD:
4185                 case IWM_TIME_EVENT_CMD:
4186                 case IWM_SCAN_REQUEST_CMD:
4187                 case IWM_REPLY_BEACON_FILTERING_CMD:
4188                 case IWM_MAC_PM_POWER_TABLE:
4189                 case IWM_TIME_QUOTA_CMD:
4190                 case IWM_REMOVE_STA:
4191                 case IWM_TXPATH_FLUSH:
4192                 case IWM_LQ_CMD:
4193                         SYNC_RESP_STRUCT(cresp, pkt);
4194                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
4195                                 memcpy(sc->sc_cmd_resp,
4196                                     pkt, sizeof(*pkt)+sizeof(*cresp));
4197                         }
4198                         break;
4199
4200                 /* ignore */
4201                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
4202                         break;
4203
4204                 case IWM_INIT_COMPLETE_NOTIF:
4205                         sc->sc_init_complete = 1;
4206                         wakeup(&sc->sc_init_complete);
4207                         break;
4208
4209                 case IWM_SCAN_COMPLETE_NOTIFICATION: {
4210                         struct iwm_scan_complete_notif *notif;
4211                         SYNC_RESP_STRUCT(notif, pkt);
4212                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
4213                         break; }
4214
4215                 case IWM_REPLY_ERROR: {
4216                         struct iwm_error_resp *resp;
4217                         SYNC_RESP_STRUCT(resp, pkt);
4218
4219                         device_printf(sc->sc_dev,
4220                             "firmware error 0x%x, cmd 0x%x\n",
4221                             le32toh(resp->error_type),
4222                             resp->cmd_id);
4223                         break; }
4224
4225                 case IWM_TIME_EVENT_NOTIFICATION: {
4226                         struct iwm_time_event_notif *notif;
4227                         SYNC_RESP_STRUCT(notif, pkt);
4228
4229                         if (notif->status) {
4230                                 if (le32toh(notif->action) &
4231                                     IWM_TE_V2_NOTIF_HOST_EVENT_START)
4232                                         sc->sc_auth_prot = 2;
4233                                 else
4234                                         sc->sc_auth_prot = 0;
4235                         } else {
4236                                 sc->sc_auth_prot = -1;
4237                         }
4238                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4239                             "%s: time event notification auth_prot=%d\n",
4240                                 __func__, sc->sc_auth_prot);
4241
4242                         wakeup(&sc->sc_auth_prot);
4243                         break; }
4244
4245                 case IWM_MCAST_FILTER_CMD:
4246                         break;
4247
4248                 default:
4249                         device_printf(sc->sc_dev,
4250                             "frame %d/%d %x UNHANDLED (this should "
4251                             "not happen)\n", qid, idx,
4252                             pkt->len_n_flags);
4253                         break;
4254                 }
4255
4256                 /*
4257                  * Why test bit 0x80?  The Linux driver:
4258                  *
4259                  * There is one exception:  uCode sets bit 15 when it
4260                  * originates the response/notification, i.e. when the
4261                  * response/notification is not a direct response to a
4262                  * command sent by the driver.  For example, uCode issues
4263                  * IWM_REPLY_RX when it sends a received frame to the driver;
4264                  * it is not a direct response to any driver command.
4265                  *
4266                  * Ok, so since when is 7 == 15?  Well, the Linux driver
4267                  * uses a slightly different format for pkt->hdr, and "qid"
4268                  * is actually the upper byte of a two-byte field.
4269                  */
4270                 if (!(pkt->hdr.qid & (1 << 7))) {
4271                         iwm_cmd_done(sc, pkt);
4272                 }
4273
4274                 ADVANCE_RXQ(sc);
4275         }
4276
4277         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
4278             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4279
4280         /*
4281          * Tell the firmware what we have processed.
4282          * Seems like the hardware gets upset unless we align
4283          * the write by 8??
4284          */
4285         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
4286         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
4287 }
4288
4289 static void
4290 iwm_intr(void *arg)
4291 {
4292         struct iwm_softc *sc = arg;
4293         int handled = 0;
4294         int r1, r2, rv = 0;
4295         int isperiodic = 0;
4296
4297         IWM_LOCK(sc);
4298         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
4299
4300         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
4301                 uint32_t *ict = sc->ict_dma.vaddr;
4302                 int tmp;
4303
4304                 tmp = htole32(ict[sc->ict_cur]);
4305                 if (!tmp)
4306                         goto out_ena;
4307
4308                 /*
4309                  * ok, there was something.  keep plowing until we have all.
4310                  */
4311                 r1 = r2 = 0;
4312                 while (tmp) {
4313                         r1 |= tmp;
4314                         ict[sc->ict_cur] = 0;
4315                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
4316                         tmp = htole32(ict[sc->ict_cur]);
4317                 }
4318
4319                 /* this is where the fun begins.  don't ask */
4320                 if (r1 == 0xffffffff)
4321                         r1 = 0;
4322
4323                 /* i am not expected to understand this */
4324                 if (r1 & 0xc0000)
4325                         r1 |= 0x8000;
4326                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
4327         } else {
4328                 r1 = IWM_READ(sc, IWM_CSR_INT);
4329                 /* "hardware gone" (where, fishing?) */
4330                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
4331                         goto out;
4332                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
4333         }
4334         if (r1 == 0 && r2 == 0) {
4335                 goto out_ena;
4336         }
4337
4338         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
4339
4340         /* ignored */
4341         handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
4342
4343         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
4344 #ifdef IWM_DEBUG
4345                 int i;
4346                 struct ieee80211com *ic = &sc->sc_ic;
4347                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4348
4349                 iwm_nic_error(sc);
4350
4351                 /* Dump driver status (TX and RX rings) while we're here. */
4352                 device_printf(sc->sc_dev, "driver status:\n");
4353                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
4354                         struct iwm_tx_ring *ring = &sc->txq[i];
4355                         device_printf(sc->sc_dev,
4356                             "  tx ring %2d: qid=%-2d cur=%-3d "
4357                             "queued=%-3d\n",
4358                             i, ring->qid, ring->cur, ring->queued);
4359                 }
4360                 device_printf(sc->sc_dev,
4361                     "  rx ring: cur=%d\n", sc->rxq.cur);
4362                 device_printf(sc->sc_dev,
4363                     "  802.11 state %d\n", vap->iv_state);
4364 #endif
4365
4366                 device_printf(sc->sc_dev, "fatal firmware error\n");
4367                 iwm_stop(sc);
4368                 rv = 1;
4369                 goto out;
4370
4371         }
4372
4373         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
4374                 handled |= IWM_CSR_INT_BIT_HW_ERR;
4375                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
4376                 iwm_stop(sc);
4377                 rv = 1;
4378                 goto out;
4379         }
4380
4381         /* firmware chunk loaded */
4382         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
4383                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
4384                 handled |= IWM_CSR_INT_BIT_FH_TX;
4385                 sc->sc_fw_chunk_done = 1;
4386                 wakeup(&sc->sc_fw);
4387         }
4388
4389         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
4390                 handled |= IWM_CSR_INT_BIT_RF_KILL;
4391                 if (iwm_check_rfkill(sc)) {
4392                         device_printf(sc->sc_dev,
4393                             "%s: rfkill switch, disabling interface\n",
4394                             __func__);
4395                         iwm_stop(sc);
4396                 }
4397         }
4398
4399         /*
4400          * The Linux driver uses periodic interrupts to avoid races.
4401          * We cargo-cult like it's going out of fashion.
4402          */
4403         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
4404                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
4405                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
4406                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
4407                         IWM_WRITE_1(sc,
4408                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
4409                 isperiodic = 1;
4410         }
4411
4412         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
4413                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
4414                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
4415
4416                 iwm_notif_intr(sc);
4417
4418                 /* enable periodic interrupt, see above */
4419                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
4420                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
4421                             IWM_CSR_INT_PERIODIC_ENA);
4422         }
4423
4424         if (__predict_false(r1 & ~handled))
4425                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
4426                     "%s: unhandled interrupts: %x\n", __func__, r1);
4427         rv = 1;
4428
4429  out_ena:
4430         iwm_restore_interrupts(sc);
4431  out:
4432         IWM_UNLOCK(sc);
4433         return;
4434 }
4435
4436 /*
4437  * Autoconf glue-sniffing
4438  */
4439 #define PCI_VENDOR_INTEL                0x8086
4440 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
4441 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
4442 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
4443 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
4444 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
4445 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
4446
4447 static const struct iwm_devices {
4448         uint16_t        device;
4449         const char      *name;
4450 } iwm_devices[] = {
4451         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
4452         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
4453         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
4454         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
4455         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
4456         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
4457 };
4458
4459 static int
4460 iwm_probe(device_t dev)
4461 {
4462         int i;
4463
4464         for (i = 0; i < nitems(iwm_devices); i++)
4465                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
4466                     pci_get_device(dev) == iwm_devices[i].device) {
4467                         device_set_desc(dev, iwm_devices[i].name);
4468                         return (BUS_PROBE_DEFAULT);
4469                 }
4470
4471         return (ENXIO);
4472 }
4473
4474 static int
4475 iwm_dev_check(device_t dev)
4476 {
4477         struct iwm_softc *sc;
4478
4479         sc = device_get_softc(dev);
4480
4481         switch (pci_get_device(dev)) {
4482         case PCI_PRODUCT_INTEL_WL_3160_1:
4483         case PCI_PRODUCT_INTEL_WL_3160_2:
4484                 sc->sc_fwname = "iwm3160fw";
4485                 sc->host_interrupt_operation_mode = 1;
4486                 return (0);
4487         case PCI_PRODUCT_INTEL_WL_7260_1:
4488         case PCI_PRODUCT_INTEL_WL_7260_2:
4489                 sc->sc_fwname = "iwm7260fw";
4490                 sc->host_interrupt_operation_mode = 1;
4491                 return (0);
4492         case PCI_PRODUCT_INTEL_WL_7265_1:
4493         case PCI_PRODUCT_INTEL_WL_7265_2:
4494                 sc->sc_fwname = "iwm7265fw";
4495                 sc->host_interrupt_operation_mode = 0;
4496                 return (0);
4497         default:
4498                 device_printf(dev, "unknown adapter type\n");
4499                 return ENXIO;
4500         }
4501 }
4502
4503 static int
4504 iwm_pci_attach(device_t dev)
4505 {
4506         struct iwm_softc *sc;
4507         int count, error, rid;
4508         uint16_t reg;
4509
4510         sc = device_get_softc(dev);
4511
4512         /* Clear device-specific "PCI retry timeout" register (41h). */
4513         reg = pci_read_config(dev, 0x40, sizeof(reg));
4514         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
4515
4516         /* Enable bus-mastering and hardware bug workaround. */
4517         pci_enable_busmaster(dev);
4518         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
4519         /* if !MSI */
4520         if (reg & PCIM_STATUS_INTxSTATE) {
4521                 reg &= ~PCIM_STATUS_INTxSTATE;
4522         }
4523         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
4524
4525         rid = PCIR_BAR(0);
4526         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
4527             RF_ACTIVE);
4528         if (sc->sc_mem == NULL) {
4529                 device_printf(sc->sc_dev, "can't map mem space\n");
4530                 return (ENXIO);
4531         }
4532         sc->sc_st = rman_get_bustag(sc->sc_mem);
4533         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
4534
4535         /* Install interrupt handler. */
4536         count = 1;
4537         rid = 0;
4538         if (pci_alloc_msi(dev, &count) == 0)
4539                 rid = 1;
4540         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
4541             (rid != 0 ? 0 : RF_SHAREABLE));
4542         if (sc->sc_irq == NULL) {
4543                 device_printf(dev, "can't map interrupt\n");
4544                         return (ENXIO);
4545         }
4546         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
4547             NULL, iwm_intr, sc, &sc->sc_ih);
4548         if (sc->sc_ih == NULL) {
4549                 device_printf(dev, "can't establish interrupt");
4550                         return (ENXIO);
4551         }
4552         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
4553
4554         return (0);
4555 }
4556
4557 static void
4558 iwm_pci_detach(device_t dev)
4559 {
4560         struct iwm_softc *sc = device_get_softc(dev);
4561
4562         if (sc->sc_irq != NULL) {
4563                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
4564                 bus_release_resource(dev, SYS_RES_IRQ,
4565                     rman_get_rid(sc->sc_irq), sc->sc_irq);
4566                 pci_release_msi(dev);
4567         }
4568         if (sc->sc_mem != NULL)
4569                 bus_release_resource(dev, SYS_RES_MEMORY,
4570                     rman_get_rid(sc->sc_mem), sc->sc_mem);
4571 }
4572
4573
4574
4575 static int
4576 iwm_attach(device_t dev)
4577 {
4578         struct iwm_softc *sc = device_get_softc(dev);
4579         struct ieee80211com *ic = &sc->sc_ic;
4580         int error;
4581         int txq_i, i;
4582
4583         sc->sc_dev = dev;
4584         IWM_LOCK_INIT(sc);
4585         mbufq_init(&sc->sc_snd, ifqmaxlen);
4586         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
4587         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
4588         sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
4589             taskqueue_thread_enqueue, &sc->sc_tq);
4590         error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
4591         if (error != 0) {
4592                 device_printf(dev, "can't start threads, error %d\n",
4593                     error);
4594                 goto fail;
4595         }
4596
4597         /* PCI attach */
4598         error = iwm_pci_attach(dev);
4599         if (error != 0)
4600                 goto fail;
4601
4602         sc->sc_wantresp = -1;
4603
4604         /* Check device type */
4605         error = iwm_dev_check(dev);
4606         if (error != 0)
4607                 goto fail;
4608
4609         sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
4610
4611         /*
4612          * We now start fiddling with the hardware
4613          */
4614         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
4615         if (iwm_prepare_card_hw(sc) != 0) {
4616                 device_printf(dev, "could not initialize hardware\n");
4617                 goto fail;
4618         }
4619
4620         /* Allocate DMA memory for firmware transfers. */
4621         if ((error = iwm_alloc_fwmem(sc)) != 0) {
4622                 device_printf(dev, "could not allocate memory for firmware\n");
4623                 goto fail;
4624         }
4625
4626         /* Allocate "Keep Warm" page. */
4627         if ((error = iwm_alloc_kw(sc)) != 0) {
4628                 device_printf(dev, "could not allocate keep warm page\n");
4629                 goto fail;
4630         }
4631
4632         /* We use ICT interrupts */
4633         if ((error = iwm_alloc_ict(sc)) != 0) {
4634                 device_printf(dev, "could not allocate ICT table\n");
4635                 goto fail;
4636         }
4637
4638         /* Allocate TX scheduler "rings". */
4639         if ((error = iwm_alloc_sched(sc)) != 0) {
4640                 device_printf(dev, "could not allocate TX scheduler rings\n");
4641                 goto fail;
4642         }
4643
4644         /* Allocate TX rings */
4645         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
4646                 if ((error = iwm_alloc_tx_ring(sc,
4647                     &sc->txq[txq_i], txq_i)) != 0) {
4648                         device_printf(dev,
4649                             "could not allocate TX ring %d\n",
4650                             txq_i);
4651                         goto fail;
4652                 }
4653         }
4654
4655         /* Allocate RX ring. */
4656         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
4657                 device_printf(dev, "could not allocate RX ring\n");
4658                 goto fail;
4659         }
4660
4661         /* Clear pending interrupts. */
4662         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
4663
4664         ic->ic_softc = sc;
4665         ic->ic_name = device_get_nameunit(sc->sc_dev);
4666         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
4667         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
4668
4669         /* Set device capabilities. */
4670         ic->ic_caps =
4671             IEEE80211_C_STA |
4672             IEEE80211_C_WPA |           /* WPA/RSN */
4673             IEEE80211_C_WME |
4674             IEEE80211_C_SHSLOT |        /* short slot time supported */
4675             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
4676 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
4677             ;
4678         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
4679                 sc->sc_phyctxt[i].id = i;
4680                 sc->sc_phyctxt[i].color = 0;
4681                 sc->sc_phyctxt[i].ref = 0;
4682                 sc->sc_phyctxt[i].channel = NULL;
4683         }
4684
4685         /* Max RSSI */
4686         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
4687         sc->sc_preinit_hook.ich_func = iwm_preinit;
4688         sc->sc_preinit_hook.ich_arg = sc;
4689         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
4690                 device_printf(dev, "config_intrhook_establish failed\n");
4691                 goto fail;
4692         }
4693
4694 #ifdef IWM_DEBUG
4695         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
4696             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
4697             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
4698 #endif
4699
4700         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4701             "<-%s\n", __func__);
4702
4703         return 0;
4704
4705         /* Free allocated memory if something failed during attachment. */
4706 fail:
4707         iwm_detach_local(sc, 0);
4708
4709         return ENXIO;
4710 }
4711
4712 static int
4713 iwm_update_edca(struct ieee80211com *ic)
4714 {
4715         struct iwm_softc *sc = ic->ic_softc;
4716
4717         device_printf(sc->sc_dev, "%s: called\n", __func__);
4718         return (0);
4719 }
4720
4721 static void
4722 iwm_preinit(void *arg)
4723 {
4724         struct iwm_softc *sc = arg;
4725         device_t dev = sc->sc_dev;
4726         struct ieee80211com *ic = &sc->sc_ic;
4727         int error;
4728
4729         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4730             "->%s\n", __func__);
4731
4732         IWM_LOCK(sc);
4733         if ((error = iwm_start_hw(sc)) != 0) {
4734                 device_printf(dev, "could not initialize hardware\n");
4735                 IWM_UNLOCK(sc);
4736                 goto fail;
4737         }
4738
4739         error = iwm_run_init_mvm_ucode(sc, 1);
4740         iwm_stop_device(sc);
4741         if (error) {
4742                 IWM_UNLOCK(sc);
4743                 goto fail;
4744         }
4745         device_printf(dev,
4746             "revision: 0x%x, firmware %d.%d (API ver. %d)\n",
4747             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
4748             IWM_UCODE_MAJOR(sc->sc_fwver),
4749             IWM_UCODE_MINOR(sc->sc_fwver),
4750             IWM_UCODE_API(sc->sc_fwver));
4751
4752         /* not all hardware can do 5GHz band */
4753         if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
4754                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
4755                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
4756         IWM_UNLOCK(sc);
4757
4758         /*
4759          * At this point we've committed - if we fail to do setup,
4760          * we now also have to tear down the net80211 state.
4761          */
4762         ieee80211_ifattach(ic);
4763         ic->ic_vap_create = iwm_vap_create;
4764         ic->ic_vap_delete = iwm_vap_delete;
4765         ic->ic_raw_xmit = iwm_raw_xmit;
4766         ic->ic_node_alloc = iwm_node_alloc;
4767         ic->ic_scan_start = iwm_scan_start;
4768         ic->ic_scan_end = iwm_scan_end;
4769         ic->ic_update_mcast = iwm_update_mcast;
4770         ic->ic_set_channel = iwm_set_channel;
4771         ic->ic_scan_curchan = iwm_scan_curchan;
4772         ic->ic_scan_mindwell = iwm_scan_mindwell;
4773         ic->ic_wme.wme_update = iwm_update_edca;
4774         ic->ic_parent = iwm_parent;
4775         ic->ic_transmit = iwm_transmit;
4776         iwm_radiotap_attach(sc);
4777         if (bootverbose)
4778                 ieee80211_announce(ic);
4779
4780         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4781             "<-%s\n", __func__);
4782         config_intrhook_disestablish(&sc->sc_preinit_hook);
4783
4784         return;
4785 fail:
4786         config_intrhook_disestablish(&sc->sc_preinit_hook);
4787         iwm_detach_local(sc, 0);
4788 }
4789
4790 /*
4791  * Attach the interface to 802.11 radiotap.
4792  */
4793 static void
4794 iwm_radiotap_attach(struct iwm_softc *sc)
4795 {
4796         struct ieee80211com *ic = &sc->sc_ic;
4797
4798         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4799             "->%s begin\n", __func__);
4800         ieee80211_radiotap_attach(ic,
4801             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
4802                 IWM_TX_RADIOTAP_PRESENT,
4803             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
4804                 IWM_RX_RADIOTAP_PRESENT);
4805         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
4806             "->%s end\n", __func__);
4807 }
4808
4809 static struct ieee80211vap *
4810 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
4811     enum ieee80211_opmode opmode, int flags,
4812     const uint8_t bssid[IEEE80211_ADDR_LEN],
4813     const uint8_t mac[IEEE80211_ADDR_LEN])
4814 {
4815         struct iwm_vap *ivp;
4816         struct ieee80211vap *vap;
4817
4818         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
4819                 return NULL;
4820         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
4821         vap = &ivp->iv_vap;
4822         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
4823         vap->iv_bmissthreshold = 10;            /* override default */
4824         /* Override with driver methods. */
4825         ivp->iv_newstate = vap->iv_newstate;
4826         vap->iv_newstate = iwm_newstate;
4827
4828         ieee80211_ratectl_init(vap);
4829         /* Complete setup. */
4830         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
4831             mac);
4832         ic->ic_opmode = opmode;
4833
4834         return vap;
4835 }
4836
4837 static void
4838 iwm_vap_delete(struct ieee80211vap *vap)
4839 {
4840         struct iwm_vap *ivp = IWM_VAP(vap);
4841
4842         ieee80211_ratectl_deinit(vap);
4843         ieee80211_vap_detach(vap);
4844         free(ivp, M_80211_VAP);
4845 }
4846
4847 static void
4848 iwm_scan_start(struct ieee80211com *ic)
4849 {
4850         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4851         struct iwm_softc *sc = ic->ic_softc;
4852         int error;
4853
4854         if (sc->sc_scanband)
4855                 return;
4856         IWM_LOCK(sc);
4857         error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ, 0, NULL, 0);
4858         if (error) {
4859                 device_printf(sc->sc_dev, "could not initiate scan\n");
4860                 IWM_UNLOCK(sc);
4861                 ieee80211_cancel_scan(vap);
4862         } else
4863                 IWM_UNLOCK(sc);
4864 }
4865
4866 static void
4867 iwm_scan_end(struct ieee80211com *ic)
4868 {
4869 }
4870
4871 static void
4872 iwm_update_mcast(struct ieee80211com *ic)
4873 {
4874 }
4875
4876 static void
4877 iwm_set_channel(struct ieee80211com *ic)
4878 {
4879 }
4880
4881 static void
4882 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
4883 {
4884 }
4885
4886 static void
4887 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
4888 {
4889         return;
4890 }
4891
4892 void
4893 iwm_init_task(void *arg1)
4894 {
4895         struct iwm_softc *sc = arg1;
4896
4897         IWM_LOCK(sc);
4898         while (sc->sc_flags & IWM_FLAG_BUSY)
4899                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
4900         sc->sc_flags |= IWM_FLAG_BUSY;
4901         iwm_stop(sc);
4902         if (sc->sc_ic.ic_nrunning > 0)
4903                 iwm_init(sc);
4904         sc->sc_flags &= ~IWM_FLAG_BUSY;
4905         wakeup(&sc->sc_flags);
4906         IWM_UNLOCK(sc);
4907 }
4908
4909 static int
4910 iwm_resume(device_t dev)
4911 {
4912         uint16_t reg;
4913
4914         /* Clear device-specific "PCI retry timeout" register (41h). */
4915         reg = pci_read_config(dev, 0x40, sizeof(reg));
4916         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
4917         iwm_init_task(device_get_softc(dev));
4918
4919         return 0;
4920 }
4921
4922 static int
4923 iwm_suspend(device_t dev)
4924 {
4925         struct iwm_softc *sc = device_get_softc(dev);
4926
4927         if (sc->sc_ic.ic_nrunning > 0) {
4928                 IWM_LOCK(sc);
4929                 iwm_stop(sc);
4930                 IWM_UNLOCK(sc);
4931         }
4932
4933         return (0);
4934 }
4935
4936 static int
4937 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
4938 {
4939         struct iwm_fw_info *fw = &sc->sc_fw;
4940         device_t dev = sc->sc_dev;
4941         int i;
4942
4943         if (sc->sc_tq) {
4944                 taskqueue_drain_all(sc->sc_tq);
4945                 taskqueue_free(sc->sc_tq);
4946         }
4947         callout_drain(&sc->sc_watchdog_to);
4948         iwm_stop_device(sc);
4949         if (do_net80211)
4950                 ieee80211_ifdetach(&sc->sc_ic);
4951
4952         /* Free descriptor rings */
4953         for (i = 0; i < nitems(sc->txq); i++)
4954                 iwm_free_tx_ring(sc, &sc->txq[i]);
4955
4956         /* Free firmware */
4957         if (fw->fw_fp != NULL)
4958                 iwm_fw_info_free(fw);
4959
4960         /* Free scheduler */
4961         iwm_free_sched(sc);
4962         if (sc->ict_dma.vaddr != NULL)
4963                 iwm_free_ict(sc);
4964         if (sc->kw_dma.vaddr != NULL)
4965                 iwm_free_kw(sc);
4966         if (sc->fw_dma.vaddr != NULL)
4967                 iwm_free_fwmem(sc);
4968
4969         /* Finished with the hardware - detach things */
4970         iwm_pci_detach(dev);
4971
4972         mbufq_drain(&sc->sc_snd);
4973         IWM_LOCK_DESTROY(sc);
4974
4975         return (0);
4976 }
4977
4978 static int
4979 iwm_detach(device_t dev)
4980 {
4981         struct iwm_softc *sc = device_get_softc(dev);
4982
4983         return (iwm_detach_local(sc, 1));
4984 }
4985
4986 static device_method_t iwm_pci_methods[] = {
4987         /* Device interface */
4988         DEVMETHOD(device_probe,         iwm_probe),
4989         DEVMETHOD(device_attach,        iwm_attach),
4990         DEVMETHOD(device_detach,        iwm_detach),
4991         DEVMETHOD(device_suspend,       iwm_suspend),
4992         DEVMETHOD(device_resume,        iwm_resume),
4993
4994         DEVMETHOD_END
4995 };
4996
4997 static driver_t iwm_pci_driver = {
4998         "iwm",
4999         iwm_pci_methods,
5000         sizeof (struct iwm_softc)
5001 };
5002
5003 static devclass_t iwm_devclass;
5004
5005 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
5006 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
5007 MODULE_DEPEND(iwm, pci, 1, 1, 1);
5008 MODULE_DEPEND(iwm, wlan, 1, 1, 1);