]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
[iwm] Add iwm_nic_unlock() calls missing from previous commit.
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109 #include "opt_iwm.h"
110
111 #include <sys/param.h>
112 #include <sys/bus.h>
113 #include <sys/conf.h>
114 #include <sys/endian.h>
115 #include <sys/firmware.h>
116 #include <sys/kernel.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/module.h>
121 #include <sys/proc.h>
122 #include <sys/rman.h>
123 #include <sys/socket.h>
124 #include <sys/sockio.h>
125 #include <sys/sysctl.h>
126 #include <sys/linker.h>
127
128 #include <machine/bus.h>
129 #include <machine/endian.h>
130 #include <machine/resource.h>
131
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcireg.h>
134
135 #include <net/bpf.h>
136
137 #include <net/if.h>
138 #include <net/if_var.h>
139 #include <net/if_arp.h>
140 #include <net/if_dl.h>
141 #include <net/if_media.h>
142 #include <net/if_types.h>
143
144 #include <netinet/in.h>
145 #include <netinet/in_systm.h>
146 #include <netinet/if_ether.h>
147 #include <netinet/ip.h>
148
149 #include <net80211/ieee80211_var.h>
150 #include <net80211/ieee80211_regdomain.h>
151 #include <net80211/ieee80211_ratectl.h>
152 #include <net80211/ieee80211_radiotap.h>
153
154 #include <dev/iwm/if_iwmreg.h>
155 #include <dev/iwm/if_iwmvar.h>
156 #include <dev/iwm/if_iwm_config.h>
157 #include <dev/iwm/if_iwm_debug.h>
158 #include <dev/iwm/if_iwm_notif_wait.h>
159 #include <dev/iwm/if_iwm_util.h>
160 #include <dev/iwm/if_iwm_binding.h>
161 #include <dev/iwm/if_iwm_phy_db.h>
162 #include <dev/iwm/if_iwm_mac_ctxt.h>
163 #include <dev/iwm/if_iwm_phy_ctxt.h>
164 #include <dev/iwm/if_iwm_time_event.h>
165 #include <dev/iwm/if_iwm_power.h>
166 #include <dev/iwm/if_iwm_scan.h>
167
168 #include <dev/iwm/if_iwm_pcie_trans.h>
169 #include <dev/iwm/if_iwm_led.h>
170 #include <dev/iwm/if_iwm_fw.h>
171
172 const uint8_t iwm_nvm_channels[] = {
173         /* 2.4 GHz */
174         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
175         /* 5 GHz */
176         36, 40, 44, 48, 52, 56, 60, 64,
177         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
178         149, 153, 157, 161, 165
179 };
180 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
181     "IWM_NUM_CHANNELS is too small");
182
183 const uint8_t iwm_nvm_channels_8000[] = {
184         /* 2.4 GHz */
185         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
186         /* 5 GHz */
187         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
188         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
189         149, 153, 157, 161, 165, 169, 173, 177, 181
190 };
191 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
192     "IWM_NUM_CHANNELS_8000 is too small");
193
194 #define IWM_NUM_2GHZ_CHANNELS   14
195 #define IWM_N_HW_ADDR_MASK      0xF
196
197 /*
198  * XXX For now, there's simply a fixed set of rate table entries
199  * that are populated.
200  */
201 const struct iwm_rate {
202         uint8_t rate;
203         uint8_t plcp;
204 } iwm_rates[] = {
205         {   2,  IWM_RATE_1M_PLCP  },
206         {   4,  IWM_RATE_2M_PLCP  },
207         {  11,  IWM_RATE_5M_PLCP  },
208         {  22,  IWM_RATE_11M_PLCP },
209         {  12,  IWM_RATE_6M_PLCP  },
210         {  18,  IWM_RATE_9M_PLCP  },
211         {  24,  IWM_RATE_12M_PLCP },
212         {  36,  IWM_RATE_18M_PLCP },
213         {  48,  IWM_RATE_24M_PLCP },
214         {  72,  IWM_RATE_36M_PLCP },
215         {  96,  IWM_RATE_48M_PLCP },
216         { 108,  IWM_RATE_54M_PLCP },
217 };
218 #define IWM_RIDX_CCK    0
219 #define IWM_RIDX_OFDM   4
220 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
221 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
222 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
223
224 struct iwm_nvm_section {
225         uint16_t length;
226         uint8_t *data;
227 };
228
229 #define IWM_MVM_UCODE_ALIVE_TIMEOUT     hz
230 #define IWM_MVM_UCODE_CALIB_TIMEOUT     (2*hz)
231
232 struct iwm_mvm_alive_data {
233         int valid;
234         uint32_t scd_base_addr;
235 };
236
237 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
238 static int      iwm_firmware_store_section(struct iwm_softc *,
239                                            enum iwm_ucode_type,
240                                            const uint8_t *, size_t);
241 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
242 static void     iwm_fw_info_free(struct iwm_fw_info *);
243 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
244 static int      iwm_alloc_fwmem(struct iwm_softc *);
245 static int      iwm_alloc_sched(struct iwm_softc *);
246 static int      iwm_alloc_kw(struct iwm_softc *);
247 static int      iwm_alloc_ict(struct iwm_softc *);
248 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
249 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
250 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
251 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
252                                   int);
253 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
254 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
255 static void     iwm_enable_interrupts(struct iwm_softc *);
256 static void     iwm_restore_interrupts(struct iwm_softc *);
257 static void     iwm_disable_interrupts(struct iwm_softc *);
258 static void     iwm_ict_reset(struct iwm_softc *);
259 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
260 static void     iwm_stop_device(struct iwm_softc *);
261 static void     iwm_mvm_nic_config(struct iwm_softc *);
262 static int      iwm_nic_rx_init(struct iwm_softc *);
263 static int      iwm_nic_tx_init(struct iwm_softc *);
264 static int      iwm_nic_init(struct iwm_softc *);
265 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
266 static int      iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
267 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
268                                    uint16_t, uint8_t *, uint16_t *);
269 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
270                                      uint16_t *, uint32_t);
271 static uint32_t iwm_eeprom_channel_flags(uint16_t);
272 static void     iwm_add_channel_band(struct iwm_softc *,
273                     struct ieee80211_channel[], int, int *, int, size_t,
274                     const uint8_t[]);
275 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
276                     struct ieee80211_channel[]);
277 static struct iwm_nvm_data *
278         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
279                            const uint16_t *, const uint16_t *,
280                            const uint16_t *, const uint16_t *,
281                            const uint16_t *);
282 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
283 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
284                                                struct iwm_nvm_data *,
285                                                const uint16_t *,
286                                                const uint16_t *);
287 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
288                             const uint16_t *);
289 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
290 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
291                                   const uint16_t *);
292 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
293                                    const uint16_t *);
294 static void     iwm_set_radio_cfg(const struct iwm_softc *,
295                                   struct iwm_nvm_data *, uint32_t);
296 static struct iwm_nvm_data *
297         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
298 static int      iwm_nvm_init(struct iwm_softc *);
299 static int      iwm_pcie_load_section(struct iwm_softc *, uint8_t,
300                                       const struct iwm_fw_desc *);
301 static int      iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
302                                              bus_addr_t, uint32_t);
303 static int      iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
304                                                 const struct iwm_fw_sects *,
305                                                 int, int *);
306 static int      iwm_pcie_load_cpu_sections(struct iwm_softc *,
307                                            const struct iwm_fw_sects *,
308                                            int, int *);
309 static int      iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
310                                                const struct iwm_fw_sects *);
311 static int      iwm_pcie_load_given_ucode(struct iwm_softc *,
312                                           const struct iwm_fw_sects *);
313 static int      iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
314 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
315 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
316 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
317                                               enum iwm_ucode_type);
318 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
319 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
320 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
321                                             struct iwm_rx_phy_info *);
322 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
323                                       struct iwm_rx_packet *,
324                                       struct iwm_rx_data *);
325 static int      iwm_get_noise(struct iwm_softc *sc,
326                     const struct iwm_mvm_statistics_rx_non_phy *);
327 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *);
328 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
329                                          struct iwm_rx_packet *,
330                                          struct iwm_node *);
331 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
332                                   struct iwm_rx_data *);
333 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
334 #if 0
335 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
336                                  uint16_t);
337 #endif
338 static const struct iwm_rate *
339         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
340                         struct mbuf *, struct iwm_tx_cmd *);
341 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
342                        struct ieee80211_node *, int);
343 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
344                              const struct ieee80211_bpf_params *);
345 static int      iwm_mvm_flush_tx_path(struct iwm_softc *sc,
346                                       uint32_t tfd_msk, uint32_t flags);
347 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
348                                                 struct iwm_mvm_add_sta_cmd *,
349                                                 int *);
350 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
351                                        int);
352 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
353 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
354 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
355                                            struct iwm_int_sta *,
356                                            const uint8_t *, uint16_t, uint16_t);
357 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
358 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
359 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
360 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
361 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
362 static struct ieee80211_node *
363                 iwm_node_alloc(struct ieee80211vap *,
364                                const uint8_t[IEEE80211_ADDR_LEN]);
365 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
366 static int      iwm_media_change(struct ifnet *);
367 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
368 static void     iwm_endscan_cb(void *, int);
369 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
370                                         struct iwm_sf_cfg_cmd *,
371                                         struct ieee80211_node *);
372 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
373 static int      iwm_send_bt_init_conf(struct iwm_softc *);
374 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
375 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
376 static int      iwm_init_hw(struct iwm_softc *);
377 static void     iwm_init(struct iwm_softc *);
378 static void     iwm_start(struct iwm_softc *);
379 static void     iwm_stop(struct iwm_softc *);
380 static void     iwm_watchdog(void *);
381 static void     iwm_parent(struct ieee80211com *);
382 #ifdef IWM_DEBUG
383 static const char *
384                 iwm_desc_lookup(uint32_t);
385 static void     iwm_nic_error(struct iwm_softc *);
386 static void     iwm_nic_umac_error(struct iwm_softc *);
387 #endif
388 static void     iwm_notif_intr(struct iwm_softc *);
389 static void     iwm_intr(void *);
390 static int      iwm_attach(device_t);
391 static int      iwm_is_valid_ether_addr(uint8_t *);
392 static void     iwm_preinit(void *);
393 static int      iwm_detach_local(struct iwm_softc *sc, int);
394 static void     iwm_init_task(void *);
395 static void     iwm_radiotap_attach(struct iwm_softc *);
396 static struct ieee80211vap *
397                 iwm_vap_create(struct ieee80211com *,
398                                const char [IFNAMSIZ], int,
399                                enum ieee80211_opmode, int,
400                                const uint8_t [IEEE80211_ADDR_LEN],
401                                const uint8_t [IEEE80211_ADDR_LEN]);
402 static void     iwm_vap_delete(struct ieee80211vap *);
403 static void     iwm_scan_start(struct ieee80211com *);
404 static void     iwm_scan_end(struct ieee80211com *);
405 static void     iwm_update_mcast(struct ieee80211com *);
406 static void     iwm_set_channel(struct ieee80211com *);
407 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
408 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
409 static int      iwm_detach(device_t);
410
411 /*
412  * Firmware parser.
413  */
414
415 static int
416 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
417 {
418         const struct iwm_fw_cscheme_list *l = (const void *)data;
419
420         if (dlen < sizeof(*l) ||
421             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
422                 return EINVAL;
423
424         /* we don't actually store anything for now, always use s/w crypto */
425
426         return 0;
427 }
428
429 static int
430 iwm_firmware_store_section(struct iwm_softc *sc,
431     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
432 {
433         struct iwm_fw_sects *fws;
434         struct iwm_fw_desc *fwone;
435
436         if (type >= IWM_UCODE_TYPE_MAX)
437                 return EINVAL;
438         if (dlen < sizeof(uint32_t))
439                 return EINVAL;
440
441         fws = &sc->sc_fw.fw_sects[type];
442         if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
443                 return EINVAL;
444
445         fwone = &fws->fw_sect[fws->fw_count];
446
447         /* first 32bit are device load offset */
448         memcpy(&fwone->offset, data, sizeof(uint32_t));
449
450         /* rest is data */
451         fwone->data = data + sizeof(uint32_t);
452         fwone->len = dlen - sizeof(uint32_t);
453
454         fws->fw_count++;
455
456         return 0;
457 }
458
459 #define IWM_DEFAULT_SCAN_CHANNELS 40
460
461 /* iwlwifi: iwl-drv.c */
462 struct iwm_tlv_calib_data {
463         uint32_t ucode_type;
464         struct iwm_tlv_calib_ctrl calib;
465 } __packed;
466
467 static int
468 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
469 {
470         const struct iwm_tlv_calib_data *def_calib = data;
471         uint32_t ucode_type = le32toh(def_calib->ucode_type);
472
473         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
474                 device_printf(sc->sc_dev,
475                     "Wrong ucode_type %u for default "
476                     "calibration.\n", ucode_type);
477                 return EINVAL;
478         }
479
480         sc->sc_default_calib[ucode_type].flow_trigger =
481             def_calib->calib.flow_trigger;
482         sc->sc_default_calib[ucode_type].event_trigger =
483             def_calib->calib.event_trigger;
484
485         return 0;
486 }
487
488 static int
489 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
490                         struct iwm_ucode_capabilities *capa)
491 {
492         const struct iwm_ucode_api *ucode_api = (const void *)data;
493         uint32_t api_index = le32toh(ucode_api->api_index);
494         uint32_t api_flags = le32toh(ucode_api->api_flags);
495         int i;
496
497         if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
498                 device_printf(sc->sc_dev,
499                     "api flags index %d larger than supported by driver\n",
500                     api_index);
501                 /* don't return an error so we can load FW that has more bits */
502                 return 0;
503         }
504
505         for (i = 0; i < 32; i++) {
506                 if (api_flags & (1U << i))
507                         setbit(capa->enabled_api, i + 32 * api_index);
508         }
509
510         return 0;
511 }
512
513 static int
514 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
515                            struct iwm_ucode_capabilities *capa)
516 {
517         const struct iwm_ucode_capa *ucode_capa = (const void *)data;
518         uint32_t api_index = le32toh(ucode_capa->api_index);
519         uint32_t api_flags = le32toh(ucode_capa->api_capa);
520         int i;
521
522         if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
523                 device_printf(sc->sc_dev,
524                     "capa flags index %d larger than supported by driver\n",
525                     api_index);
526                 /* don't return an error so we can load FW that has more bits */
527                 return 0;
528         }
529
530         for (i = 0; i < 32; i++) {
531                 if (api_flags & (1U << i))
532                         setbit(capa->enabled_capa, i + 32 * api_index);
533         }
534
535         return 0;
536 }
537
538 static void
539 iwm_fw_info_free(struct iwm_fw_info *fw)
540 {
541         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
542         fw->fw_fp = NULL;
543         /* don't touch fw->fw_status */
544         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
545 }
546
547 static int
548 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
549 {
550         struct iwm_fw_info *fw = &sc->sc_fw;
551         const struct iwm_tlv_ucode_header *uhdr;
552         struct iwm_ucode_tlv tlv;
553         struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
554         enum iwm_ucode_tlv_type tlv_type;
555         const struct firmware *fwp;
556         const uint8_t *data;
557         uint32_t usniffer_img;
558         uint32_t paging_mem_size;
559         int num_of_cpus;
560         int error = 0;
561         size_t len;
562
563         if (fw->fw_status == IWM_FW_STATUS_DONE &&
564             ucode_type != IWM_UCODE_INIT)
565                 return 0;
566
567         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
568                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
569         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
570
571         if (fw->fw_fp != NULL)
572                 iwm_fw_info_free(fw);
573
574         /*
575          * Load firmware into driver memory.
576          * fw_fp will be set.
577          */
578         IWM_UNLOCK(sc);
579         fwp = firmware_get(sc->cfg->fw_name);
580         IWM_LOCK(sc);
581         if (fwp == NULL) {
582                 device_printf(sc->sc_dev,
583                     "could not read firmware %s (error %d)\n",
584                     sc->cfg->fw_name, error);
585                 goto out;
586         }
587         fw->fw_fp = fwp;
588
589         /* (Re-)Initialize default values. */
590         capa->flags = 0;
591         capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
592         capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
593         memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
594         memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
595         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
596
597         /*
598          * Parse firmware contents
599          */
600
601         uhdr = (const void *)fw->fw_fp->data;
602         if (*(const uint32_t *)fw->fw_fp->data != 0
603             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
604                 device_printf(sc->sc_dev, "invalid firmware %s\n",
605                     sc->cfg->fw_name);
606                 error = EINVAL;
607                 goto out;
608         }
609
610         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
611             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
612             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
613             IWM_UCODE_API(le32toh(uhdr->ver)));
614         data = uhdr->data;
615         len = fw->fw_fp->datasize - sizeof(*uhdr);
616
617         while (len >= sizeof(tlv)) {
618                 size_t tlv_len;
619                 const void *tlv_data;
620
621                 memcpy(&tlv, data, sizeof(tlv));
622                 tlv_len = le32toh(tlv.length);
623                 tlv_type = le32toh(tlv.type);
624
625                 len -= sizeof(tlv);
626                 data += sizeof(tlv);
627                 tlv_data = data;
628
629                 if (len < tlv_len) {
630                         device_printf(sc->sc_dev,
631                             "firmware too short: %zu bytes\n",
632                             len);
633                         error = EINVAL;
634                         goto parse_out;
635                 }
636
637                 switch ((int)tlv_type) {
638                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
639                         if (tlv_len < sizeof(uint32_t)) {
640                                 device_printf(sc->sc_dev,
641                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
642                                     __func__,
643                                     (int) tlv_len);
644                                 error = EINVAL;
645                                 goto parse_out;
646                         }
647                         capa->max_probe_length =
648                             le32toh(*(const uint32_t *)tlv_data);
649                         /* limit it to something sensible */
650                         if (capa->max_probe_length >
651                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
652                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
653                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
654                                     "ridiculous\n", __func__);
655                                 error = EINVAL;
656                                 goto parse_out;
657                         }
658                         break;
659                 case IWM_UCODE_TLV_PAN:
660                         if (tlv_len) {
661                                 device_printf(sc->sc_dev,
662                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
663                                     __func__,
664                                     (int) tlv_len);
665                                 error = EINVAL;
666                                 goto parse_out;
667                         }
668                         capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
669                         break;
670                 case IWM_UCODE_TLV_FLAGS:
671                         if (tlv_len < sizeof(uint32_t)) {
672                                 device_printf(sc->sc_dev,
673                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
674                                     __func__,
675                                     (int) tlv_len);
676                                 error = EINVAL;
677                                 goto parse_out;
678                         }
679                         /*
680                          * Apparently there can be many flags, but Linux driver
681                          * parses only the first one, and so do we.
682                          *
683                          * XXX: why does this override IWM_UCODE_TLV_PAN?
684                          * Intentional or a bug?  Observations from
685                          * current firmware file:
686                          *  1) TLV_PAN is parsed first
687                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
688                          * ==> this resets TLV_PAN to itself... hnnnk
689                          */
690                         capa->flags = le32toh(*(const uint32_t *)tlv_data);
691                         break;
692                 case IWM_UCODE_TLV_CSCHEME:
693                         if ((error = iwm_store_cscheme(sc,
694                             tlv_data, tlv_len)) != 0) {
695                                 device_printf(sc->sc_dev,
696                                     "%s: iwm_store_cscheme(): returned %d\n",
697                                     __func__,
698                                     error);
699                                 goto parse_out;
700                         }
701                         break;
702                 case IWM_UCODE_TLV_NUM_OF_CPU:
703                         if (tlv_len != sizeof(uint32_t)) {
704                                 device_printf(sc->sc_dev,
705                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
706                                     __func__,
707                                     (int) tlv_len);
708                                 error = EINVAL;
709                                 goto parse_out;
710                         }
711                         num_of_cpus = le32toh(*(const uint32_t *)tlv_data);
712                         if (num_of_cpus == 2) {
713                                 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
714                                         TRUE;
715                                 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
716                                         TRUE;
717                                 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
718                                         TRUE;
719                         } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
720                                 device_printf(sc->sc_dev,
721                                     "%s: Driver supports only 1 or 2 CPUs\n",
722                                     __func__);
723                                 error = EINVAL;
724                                 goto parse_out;
725                         }
726                         break;
727                 case IWM_UCODE_TLV_SEC_RT:
728                         if ((error = iwm_firmware_store_section(sc,
729                             IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
730                                 device_printf(sc->sc_dev,
731                                     "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
732                                     __func__,
733                                     error);
734                                 goto parse_out;
735                         }
736                         break;
737                 case IWM_UCODE_TLV_SEC_INIT:
738                         if ((error = iwm_firmware_store_section(sc,
739                             IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
740                                 device_printf(sc->sc_dev,
741                                     "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
742                                     __func__,
743                                     error);
744                                 goto parse_out;
745                         }
746                         break;
747                 case IWM_UCODE_TLV_SEC_WOWLAN:
748                         if ((error = iwm_firmware_store_section(sc,
749                             IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
750                                 device_printf(sc->sc_dev,
751                                     "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
752                                     __func__,
753                                     error);
754                                 goto parse_out;
755                         }
756                         break;
757                 case IWM_UCODE_TLV_DEF_CALIB:
758                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
759                                 device_printf(sc->sc_dev,
760                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
761                                     __func__,
762                                     (int) tlv_len,
763                                     (int) sizeof(struct iwm_tlv_calib_data));
764                                 error = EINVAL;
765                                 goto parse_out;
766                         }
767                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
768                                 device_printf(sc->sc_dev,
769                                     "%s: iwm_set_default_calib() failed: %d\n",
770                                     __func__,
771                                     error);
772                                 goto parse_out;
773                         }
774                         break;
775                 case IWM_UCODE_TLV_PHY_SKU:
776                         if (tlv_len != sizeof(uint32_t)) {
777                                 error = EINVAL;
778                                 device_printf(sc->sc_dev,
779                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
780                                     __func__,
781                                     (int) tlv_len);
782                                 goto parse_out;
783                         }
784                         sc->sc_fw.phy_config =
785                             le32toh(*(const uint32_t *)tlv_data);
786                         sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
787                                                   IWM_FW_PHY_CFG_TX_CHAIN) >>
788                                                   IWM_FW_PHY_CFG_TX_CHAIN_POS;
789                         sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
790                                                   IWM_FW_PHY_CFG_RX_CHAIN) >>
791                                                   IWM_FW_PHY_CFG_RX_CHAIN_POS;
792                         break;
793
794                 case IWM_UCODE_TLV_API_CHANGES_SET: {
795                         if (tlv_len != sizeof(struct iwm_ucode_api)) {
796                                 error = EINVAL;
797                                 goto parse_out;
798                         }
799                         if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
800                                 error = EINVAL;
801                                 goto parse_out;
802                         }
803                         break;
804                 }
805
806                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
807                         if (tlv_len != sizeof(struct iwm_ucode_capa)) {
808                                 error = EINVAL;
809                                 goto parse_out;
810                         }
811                         if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
812                                 error = EINVAL;
813                                 goto parse_out;
814                         }
815                         break;
816                 }
817
818                 case 48: /* undocumented TLV */
819                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
820                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
821                         /* ignore, not used by current driver */
822                         break;
823
824                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
825                         if ((error = iwm_firmware_store_section(sc,
826                             IWM_UCODE_REGULAR_USNIFFER, tlv_data,
827                             tlv_len)) != 0)
828                                 goto parse_out;
829                         break;
830
831                 case IWM_UCODE_TLV_PAGING:
832                         if (tlv_len != sizeof(uint32_t)) {
833                                 error = EINVAL;
834                                 goto parse_out;
835                         }
836                         paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
837
838                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
839                             "%s: Paging: paging enabled (size = %u bytes)\n",
840                             __func__, paging_mem_size);
841                         if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
842                                 device_printf(sc->sc_dev,
843                                         "%s: Paging: driver supports up to %u bytes for paging image\n",
844                                         __func__, IWM_MAX_PAGING_IMAGE_SIZE);
845                                 error = EINVAL;
846                                 goto out;
847                         }
848                         if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
849                                 device_printf(sc->sc_dev,
850                                     "%s: Paging: image isn't multiple %u\n",
851                                     __func__, IWM_FW_PAGING_SIZE);
852                                 error = EINVAL;
853                                 goto out;
854                         }
855
856                         sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
857                             paging_mem_size;
858                         usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
859                         sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
860                             paging_mem_size;
861                         break;
862
863                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
864                         if (tlv_len != sizeof(uint32_t)) {
865                                 error = EINVAL;
866                                 goto parse_out;
867                         }
868                         capa->n_scan_channels =
869                             le32toh(*(const uint32_t *)tlv_data);
870                         break;
871
872                 case IWM_UCODE_TLV_FW_VERSION:
873                         if (tlv_len != sizeof(uint32_t) * 3) {
874                                 error = EINVAL;
875                                 goto parse_out;
876                         }
877                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
878                             "%d.%d.%d",
879                             le32toh(((const uint32_t *)tlv_data)[0]),
880                             le32toh(((const uint32_t *)tlv_data)[1]),
881                             le32toh(((const uint32_t *)tlv_data)[2]));
882                         break;
883
884                 case IWM_UCODE_TLV_FW_MEM_SEG:
885                         break;
886
887                 default:
888                         device_printf(sc->sc_dev,
889                             "%s: unknown firmware section %d, abort\n",
890                             __func__, tlv_type);
891                         error = EINVAL;
892                         goto parse_out;
893                 }
894
895                 len -= roundup(tlv_len, 4);
896                 data += roundup(tlv_len, 4);
897         }
898
899         KASSERT(error == 0, ("unhandled error"));
900
901  parse_out:
902         if (error) {
903                 device_printf(sc->sc_dev, "firmware parse error %d, "
904                     "section type %d\n", error, tlv_type);
905         }
906
907  out:
908         if (error) {
909                 fw->fw_status = IWM_FW_STATUS_NONE;
910                 if (fw->fw_fp != NULL)
911                         iwm_fw_info_free(fw);
912         } else
913                 fw->fw_status = IWM_FW_STATUS_DONE;
914         wakeup(&sc->sc_fw);
915
916         return error;
917 }
918
919 /*
920  * DMA resource routines
921  */
922
923 /* fwmem is used to load firmware onto the card */
924 static int
925 iwm_alloc_fwmem(struct iwm_softc *sc)
926 {
927         /* Must be aligned on a 16-byte boundary. */
928         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
929             IWM_FH_MEM_TB_MAX_LENGTH, 16);
930 }
931
932 /* tx scheduler rings.  not used? */
933 static int
934 iwm_alloc_sched(struct iwm_softc *sc)
935 {
936         /* TX scheduler rings must be aligned on a 1KB boundary. */
937         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
938             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
939 }
940
941 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
942 static int
943 iwm_alloc_kw(struct iwm_softc *sc)
944 {
945         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
946 }
947
948 /* interrupt cause table */
949 static int
950 iwm_alloc_ict(struct iwm_softc *sc)
951 {
952         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
953             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
954 }
955
956 static int
957 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
958 {
959         bus_size_t size;
960         int i, error;
961
962         ring->cur = 0;
963
964         /* Allocate RX descriptors (256-byte aligned). */
965         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
966         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
967         if (error != 0) {
968                 device_printf(sc->sc_dev,
969                     "could not allocate RX ring DMA memory\n");
970                 goto fail;
971         }
972         ring->desc = ring->desc_dma.vaddr;
973
974         /* Allocate RX status area (16-byte aligned). */
975         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
976             sizeof(*ring->stat), 16);
977         if (error != 0) {
978                 device_printf(sc->sc_dev,
979                     "could not allocate RX status DMA memory\n");
980                 goto fail;
981         }
982         ring->stat = ring->stat_dma.vaddr;
983
984         /* Create RX buffer DMA tag. */
985         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
986             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
987             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
988         if (error != 0) {
989                 device_printf(sc->sc_dev,
990                     "%s: could not create RX buf DMA tag, error %d\n",
991                     __func__, error);
992                 goto fail;
993         }
994
995         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
996         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
997         if (error != 0) {
998                 device_printf(sc->sc_dev,
999                     "%s: could not create RX buf DMA map, error %d\n",
1000                     __func__, error);
1001                 goto fail;
1002         }
1003         /*
1004          * Allocate and map RX buffers.
1005          */
1006         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1007                 struct iwm_rx_data *data = &ring->data[i];
1008                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1009                 if (error != 0) {
1010                         device_printf(sc->sc_dev,
1011                             "%s: could not create RX buf DMA map, error %d\n",
1012                             __func__, error);
1013                         goto fail;
1014                 }
1015                 data->m = NULL;
1016
1017                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1018                         goto fail;
1019                 }
1020         }
1021         return 0;
1022
1023 fail:   iwm_free_rx_ring(sc, ring);
1024         return error;
1025 }
1026
1027 static void
1028 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1029 {
1030         /* Reset the ring state */
1031         ring->cur = 0;
1032
1033         /*
1034          * The hw rx ring index in shared memory must also be cleared,
1035          * otherwise the discrepancy can cause reprocessing chaos.
1036          */
1037         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1038 }
1039
1040 static void
1041 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1042 {
1043         int i;
1044
1045         iwm_dma_contig_free(&ring->desc_dma);
1046         iwm_dma_contig_free(&ring->stat_dma);
1047
1048         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1049                 struct iwm_rx_data *data = &ring->data[i];
1050
1051                 if (data->m != NULL) {
1052                         bus_dmamap_sync(ring->data_dmat, data->map,
1053                             BUS_DMASYNC_POSTREAD);
1054                         bus_dmamap_unload(ring->data_dmat, data->map);
1055                         m_freem(data->m);
1056                         data->m = NULL;
1057                 }
1058                 if (data->map != NULL) {
1059                         bus_dmamap_destroy(ring->data_dmat, data->map);
1060                         data->map = NULL;
1061                 }
1062         }
1063         if (ring->spare_map != NULL) {
1064                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1065                 ring->spare_map = NULL;
1066         }
1067         if (ring->data_dmat != NULL) {
1068                 bus_dma_tag_destroy(ring->data_dmat);
1069                 ring->data_dmat = NULL;
1070         }
1071 }
1072
1073 static int
1074 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1075 {
1076         bus_addr_t paddr;
1077         bus_size_t size;
1078         size_t maxsize;
1079         int nsegments;
1080         int i, error;
1081
1082         ring->qid = qid;
1083         ring->queued = 0;
1084         ring->cur = 0;
1085
1086         /* Allocate TX descriptors (256-byte aligned). */
1087         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1088         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1089         if (error != 0) {
1090                 device_printf(sc->sc_dev,
1091                     "could not allocate TX ring DMA memory\n");
1092                 goto fail;
1093         }
1094         ring->desc = ring->desc_dma.vaddr;
1095
1096         /*
1097          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1098          * to allocate commands space for other rings.
1099          */
1100         if (qid > IWM_MVM_CMD_QUEUE)
1101                 return 0;
1102
1103         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1104         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1105         if (error != 0) {
1106                 device_printf(sc->sc_dev,
1107                     "could not allocate TX cmd DMA memory\n");
1108                 goto fail;
1109         }
1110         ring->cmd = ring->cmd_dma.vaddr;
1111
1112         /* FW commands may require more mapped space than packets. */
1113         if (qid == IWM_MVM_CMD_QUEUE) {
1114                 maxsize = IWM_RBUF_SIZE;
1115                 nsegments = 1;
1116         } else {
1117                 maxsize = MCLBYTES;
1118                 nsegments = IWM_MAX_SCATTER - 2;
1119         }
1120
1121         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1122             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1123             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1124         if (error != 0) {
1125                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1126                 goto fail;
1127         }
1128
1129         paddr = ring->cmd_dma.paddr;
1130         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1131                 struct iwm_tx_data *data = &ring->data[i];
1132
1133                 data->cmd_paddr = paddr;
1134                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1135                     + offsetof(struct iwm_tx_cmd, scratch);
1136                 paddr += sizeof(struct iwm_device_cmd);
1137
1138                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1139                 if (error != 0) {
1140                         device_printf(sc->sc_dev,
1141                             "could not create TX buf DMA map\n");
1142                         goto fail;
1143                 }
1144         }
1145         KASSERT(paddr == ring->cmd_dma.paddr + size,
1146             ("invalid physical address"));
1147         return 0;
1148
1149 fail:   iwm_free_tx_ring(sc, ring);
1150         return error;
1151 }
1152
1153 static void
1154 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1155 {
1156         int i;
1157
1158         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1159                 struct iwm_tx_data *data = &ring->data[i];
1160
1161                 if (data->m != NULL) {
1162                         bus_dmamap_sync(ring->data_dmat, data->map,
1163                             BUS_DMASYNC_POSTWRITE);
1164                         bus_dmamap_unload(ring->data_dmat, data->map);
1165                         m_freem(data->m);
1166                         data->m = NULL;
1167                 }
1168         }
1169         /* Clear TX descriptors. */
1170         memset(ring->desc, 0, ring->desc_dma.size);
1171         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1172             BUS_DMASYNC_PREWRITE);
1173         sc->qfullmsk &= ~(1 << ring->qid);
1174         ring->queued = 0;
1175         ring->cur = 0;
1176
1177         if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1178                 iwm_pcie_clear_cmd_in_flight(sc);
1179 }
1180
1181 static void
1182 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1183 {
1184         int i;
1185
1186         iwm_dma_contig_free(&ring->desc_dma);
1187         iwm_dma_contig_free(&ring->cmd_dma);
1188
1189         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1190                 struct iwm_tx_data *data = &ring->data[i];
1191
1192                 if (data->m != NULL) {
1193                         bus_dmamap_sync(ring->data_dmat, data->map,
1194                             BUS_DMASYNC_POSTWRITE);
1195                         bus_dmamap_unload(ring->data_dmat, data->map);
1196                         m_freem(data->m);
1197                         data->m = NULL;
1198                 }
1199                 if (data->map != NULL) {
1200                         bus_dmamap_destroy(ring->data_dmat, data->map);
1201                         data->map = NULL;
1202                 }
1203         }
1204         if (ring->data_dmat != NULL) {
1205                 bus_dma_tag_destroy(ring->data_dmat);
1206                 ring->data_dmat = NULL;
1207         }
1208 }
1209
1210 /*
1211  * High-level hardware frobbing routines
1212  */
1213
1214 static void
1215 iwm_enable_interrupts(struct iwm_softc *sc)
1216 {
1217         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1218         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1219 }
1220
1221 static void
1222 iwm_restore_interrupts(struct iwm_softc *sc)
1223 {
1224         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1225 }
1226
1227 static void
1228 iwm_disable_interrupts(struct iwm_softc *sc)
1229 {
1230         /* disable interrupts */
1231         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1232
1233         /* acknowledge all interrupts */
1234         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1235         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1236 }
1237
1238 static void
1239 iwm_ict_reset(struct iwm_softc *sc)
1240 {
1241         iwm_disable_interrupts(sc);
1242
1243         /* Reset ICT table. */
1244         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1245         sc->ict_cur = 0;
1246
1247         /* Set physical address of ICT table (4KB aligned). */
1248         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1249             IWM_CSR_DRAM_INT_TBL_ENABLE
1250             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1251             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1252             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1253
1254         /* Switch to ICT interrupt mode in driver. */
1255         sc->sc_flags |= IWM_FLAG_USE_ICT;
1256
1257         /* Re-enable interrupts. */
1258         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1259         iwm_enable_interrupts(sc);
1260 }
1261
1262 /* iwlwifi pcie/trans.c */
1263
1264 /*
1265  * Since this .. hard-resets things, it's time to actually
1266  * mark the first vap (if any) as having no mac context.
1267  * It's annoying, but since the driver is potentially being
1268  * stop/start'ed whilst active (thanks openbsd port!) we
1269  * have to correctly track this.
1270  */
1271 static void
1272 iwm_stop_device(struct iwm_softc *sc)
1273 {
1274         struct ieee80211com *ic = &sc->sc_ic;
1275         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1276         int chnl, qid;
1277         uint32_t mask = 0;
1278
1279         /* tell the device to stop sending interrupts */
1280         iwm_disable_interrupts(sc);
1281
1282         /*
1283          * FreeBSD-local: mark the first vap as not-uploaded,
1284          * so the next transition through auth/assoc
1285          * will correctly populate the MAC context.
1286          */
1287         if (vap) {
1288                 struct iwm_vap *iv = IWM_VAP(vap);
1289                 iv->is_uploaded = 0;
1290         }
1291
1292         /* device going down, Stop using ICT table */
1293         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1294
1295         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1296
1297         if (iwm_nic_lock(sc)) {
1298                 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1299
1300                 /* Stop each Tx DMA channel */
1301                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1302                         IWM_WRITE(sc,
1303                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1304                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1305                 }
1306
1307                 /* Wait for DMA channels to be idle */
1308                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1309                     5000)) {
1310                         device_printf(sc->sc_dev,
1311                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1312                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1313                 }
1314                 iwm_nic_unlock(sc);
1315         }
1316         iwm_pcie_rx_stop(sc);
1317
1318         /* Stop RX ring. */
1319         iwm_reset_rx_ring(sc, &sc->rxq);
1320
1321         /* Reset all TX rings. */
1322         for (qid = 0; qid < nitems(sc->txq); qid++)
1323                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1324
1325         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1326                 /* Power-down device's busmaster DMA clocks */
1327                 if (iwm_nic_lock(sc)) {
1328                         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1329                             IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1330                         iwm_nic_unlock(sc);
1331                 }
1332                 DELAY(5);
1333         }
1334
1335         /* Make sure (redundant) we've released our request to stay awake */
1336         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1337             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1338
1339         /* Stop the device, and put it in low power state */
1340         iwm_apm_stop(sc);
1341
1342         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1343          * Clean again the interrupt here
1344          */
1345         iwm_disable_interrupts(sc);
1346         /* stop and reset the on-board processor */
1347         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1348
1349         /*
1350          * Even if we stop the HW, we still want the RF kill
1351          * interrupt
1352          */
1353         iwm_enable_rfkill_int(sc);
1354         iwm_check_rfkill(sc);
1355 }
1356
1357 /* iwlwifi: mvm/ops.c */
1358 static void
1359 iwm_mvm_nic_config(struct iwm_softc *sc)
1360 {
1361         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1362         uint32_t reg_val = 0;
1363         uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1364
1365         radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1366             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1367         radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1368             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1369         radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1370             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1371
1372         /* SKU control */
1373         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1374             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1375         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1376             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1377
1378         /* radio configuration */
1379         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1380         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1381         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1382
1383         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1384
1385         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1386             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1387             radio_cfg_step, radio_cfg_dash);
1388
1389         /*
1390          * W/A : NIC is stuck in a reset state after Early PCIe power off
1391          * (PCIe power is lost before PERST# is asserted), causing ME FW
1392          * to lose ownership and not being able to obtain it back.
1393          */
1394         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1395                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1396                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1397                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1398         }
1399 }
1400
1401 static int
1402 iwm_nic_rx_init(struct iwm_softc *sc)
1403 {
1404         /*
1405          * Initialize RX ring.  This is from the iwn driver.
1406          */
1407         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1408
1409         /* Stop Rx DMA */
1410         iwm_pcie_rx_stop(sc);
1411
1412         if (!iwm_nic_lock(sc))
1413                 return EBUSY;
1414
1415         /* reset and flush pointers */
1416         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1417         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1418         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1419         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1420
1421         /* Set physical address of RX ring (256-byte aligned). */
1422         IWM_WRITE(sc,
1423             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1424
1425         /* Set physical address of RX status (16-byte aligned). */
1426         IWM_WRITE(sc,
1427             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1428
1429         /* Enable RX. */
1430         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1431             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1432             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1433             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1434             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1435             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1436             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1437             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1438
1439         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1440
1441         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1442         if (sc->cfg->host_interrupt_operation_mode)
1443                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1444
1445         /*
1446          * Thus sayeth el jefe (iwlwifi) via a comment:
1447          *
1448          * This value should initially be 0 (before preparing any
1449          * RBs), should be 8 after preparing the first 8 RBs (for example)
1450          */
1451         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1452
1453         iwm_nic_unlock(sc);
1454
1455         return 0;
1456 }
1457
1458 static int
1459 iwm_nic_tx_init(struct iwm_softc *sc)
1460 {
1461         int qid;
1462
1463         if (!iwm_nic_lock(sc))
1464                 return EBUSY;
1465
1466         /* Deactivate TX scheduler. */
1467         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1468
1469         /* Set physical address of "keep warm" page (16-byte aligned). */
1470         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1471
1472         /* Initialize TX rings. */
1473         for (qid = 0; qid < nitems(sc->txq); qid++) {
1474                 struct iwm_tx_ring *txq = &sc->txq[qid];
1475
1476                 /* Set physical address of TX ring (256-byte aligned). */
1477                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1478                     txq->desc_dma.paddr >> 8);
1479                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1480                     "%s: loading ring %d descriptors (%p) at %lx\n",
1481                     __func__,
1482                     qid, txq->desc,
1483                     (unsigned long) (txq->desc_dma.paddr >> 8));
1484         }
1485
1486         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1487
1488         iwm_nic_unlock(sc);
1489
1490         return 0;
1491 }
1492
1493 static int
1494 iwm_nic_init(struct iwm_softc *sc)
1495 {
1496         int error;
1497
1498         iwm_apm_init(sc);
1499         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1500                 iwm_set_pwr(sc);
1501
1502         iwm_mvm_nic_config(sc);
1503
1504         if ((error = iwm_nic_rx_init(sc)) != 0)
1505                 return error;
1506
1507         /*
1508          * Ditto for TX, from iwn
1509          */
1510         if ((error = iwm_nic_tx_init(sc)) != 0)
1511                 return error;
1512
1513         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1514             "%s: shadow registers enabled\n", __func__);
1515         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1516
1517         return 0;
1518 }
1519
1520 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1521         IWM_MVM_TX_FIFO_VO,
1522         IWM_MVM_TX_FIFO_VI,
1523         IWM_MVM_TX_FIFO_BE,
1524         IWM_MVM_TX_FIFO_BK,
1525 };
1526
1527 static int
1528 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1529 {
1530         if (!iwm_nic_lock(sc)) {
1531                 device_printf(sc->sc_dev,
1532                     "%s: cannot enable txq %d\n",
1533                     __func__,
1534                     qid);
1535                 return EBUSY;
1536         }
1537
1538         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1539
1540         if (qid == IWM_MVM_CMD_QUEUE) {
1541                 /* unactivate before configuration */
1542                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1543                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1544                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1545
1546                 iwm_nic_unlock(sc);
1547
1548                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1549
1550                 if (!iwm_nic_lock(sc)) {
1551                         device_printf(sc->sc_dev,
1552                             "%s: cannot enable txq %d\n", __func__, qid);
1553                         return EBUSY;
1554                 }
1555                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1556                 iwm_nic_unlock(sc);
1557
1558                 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1559                 /* Set scheduler window size and frame limit. */
1560                 iwm_write_mem32(sc,
1561                     sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1562                     sizeof(uint32_t),
1563                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1564                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1565                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1566                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1567
1568                 if (!iwm_nic_lock(sc)) {
1569                         device_printf(sc->sc_dev,
1570                             "%s: cannot enable txq %d\n", __func__, qid);
1571                         return EBUSY;
1572                 }
1573                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1574                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1575                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1576                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1577                     IWM_SCD_QUEUE_STTS_REG_MSK);
1578         } else {
1579                 struct iwm_scd_txq_cfg_cmd cmd;
1580                 int error;
1581
1582                 iwm_nic_unlock(sc);
1583
1584                 memset(&cmd, 0, sizeof(cmd));
1585                 cmd.scd_queue = qid;
1586                 cmd.enable = 1;
1587                 cmd.sta_id = sta_id;
1588                 cmd.tx_fifo = fifo;
1589                 cmd.aggregate = 0;
1590                 cmd.window = IWM_FRAME_LIMIT;
1591
1592                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1593                     sizeof(cmd), &cmd);
1594                 if (error) {
1595                         device_printf(sc->sc_dev,
1596                             "cannot enable txq %d\n", qid);
1597                         return error;
1598                 }
1599
1600                 if (!iwm_nic_lock(sc))
1601                         return EBUSY;
1602         }
1603
1604         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1605             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1606
1607         iwm_nic_unlock(sc);
1608
1609         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1610             __func__, qid, fifo);
1611
1612         return 0;
1613 }
1614
1615 static int
1616 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1617 {
1618         int error, chnl;
1619
1620         int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1621             IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1622
1623         if (!iwm_nic_lock(sc))
1624                 return EBUSY;
1625
1626         iwm_ict_reset(sc);
1627
1628         sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1629         if (scd_base_addr != 0 &&
1630             scd_base_addr != sc->scd_base_addr) {
1631                 device_printf(sc->sc_dev,
1632                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1633                     __func__, sc->scd_base_addr, scd_base_addr);
1634         }
1635
1636         iwm_nic_unlock(sc);
1637
1638         /* reset context data, TX status and translation data */
1639         error = iwm_write_mem(sc,
1640             sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1641             NULL, clear_dwords);
1642         if (error)
1643                 return EBUSY;
1644
1645         if (!iwm_nic_lock(sc))
1646                 return EBUSY;
1647
1648         /* Set physical address of TX scheduler rings (1KB aligned). */
1649         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1650
1651         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1652
1653         iwm_nic_unlock(sc);
1654
1655         /* enable command channel */
1656         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1657         if (error)
1658                 return error;
1659
1660         if (!iwm_nic_lock(sc))
1661                 return EBUSY;
1662
1663         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1664
1665         /* Enable DMA channels. */
1666         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1667                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1668                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1669                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1670         }
1671
1672         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1673             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1674
1675         iwm_nic_unlock(sc);
1676
1677         /* Enable L1-Active */
1678         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1679                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1680                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1681         }
1682
1683         return error;
1684 }
1685
1686 /*
1687  * NVM read access and content parsing.  We do not support
1688  * external NVM or writing NVM.
1689  * iwlwifi/mvm/nvm.c
1690  */
1691
1692 /* Default NVM size to read */
1693 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1694
1695 #define IWM_NVM_WRITE_OPCODE 1
1696 #define IWM_NVM_READ_OPCODE 0
1697
1698 /* load nvm chunk response */
1699 enum {
1700         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1701         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1702 };
1703
1704 static int
1705 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1706         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1707 {
1708         struct iwm_nvm_access_cmd nvm_access_cmd = {
1709                 .offset = htole16(offset),
1710                 .length = htole16(length),
1711                 .type = htole16(section),
1712                 .op_code = IWM_NVM_READ_OPCODE,
1713         };
1714         struct iwm_nvm_access_resp *nvm_resp;
1715         struct iwm_rx_packet *pkt;
1716         struct iwm_host_cmd cmd = {
1717                 .id = IWM_NVM_ACCESS_CMD,
1718                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1719                 .data = { &nvm_access_cmd, },
1720         };
1721         int ret, bytes_read, offset_read;
1722         uint8_t *resp_data;
1723
1724         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1725
1726         ret = iwm_send_cmd(sc, &cmd);
1727         if (ret) {
1728                 device_printf(sc->sc_dev,
1729                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1730                 return ret;
1731         }
1732
1733         pkt = cmd.resp_pkt;
1734
1735         /* Extract NVM response */
1736         nvm_resp = (void *)pkt->data;
1737         ret = le16toh(nvm_resp->status);
1738         bytes_read = le16toh(nvm_resp->length);
1739         offset_read = le16toh(nvm_resp->offset);
1740         resp_data = nvm_resp->data;
1741         if (ret) {
1742                 if ((offset != 0) &&
1743                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1744                         /*
1745                          * meaning of NOT_VALID_ADDRESS:
1746                          * driver try to read chunk from address that is
1747                          * multiple of 2K and got an error since addr is empty.
1748                          * meaning of (offset != 0): driver already
1749                          * read valid data from another chunk so this case
1750                          * is not an error.
1751                          */
1752                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1753                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1754                                     offset);
1755                         *len = 0;
1756                         ret = 0;
1757                 } else {
1758                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1759                                     "NVM access command failed with status %d\n", ret);
1760                         ret = EIO;
1761                 }
1762                 goto exit;
1763         }
1764
1765         if (offset_read != offset) {
1766                 device_printf(sc->sc_dev,
1767                     "NVM ACCESS response with invalid offset %d\n",
1768                     offset_read);
1769                 ret = EINVAL;
1770                 goto exit;
1771         }
1772
1773         if (bytes_read > length) {
1774                 device_printf(sc->sc_dev,
1775                     "NVM ACCESS response with too much data "
1776                     "(%d bytes requested, %d bytes received)\n",
1777                     length, bytes_read);
1778                 ret = EINVAL;
1779                 goto exit;
1780         }
1781
1782         /* Write data to NVM */
1783         memcpy(data + offset, resp_data, bytes_read);
1784         *len = bytes_read;
1785
1786  exit:
1787         iwm_free_resp(sc, &cmd);
1788         return ret;
1789 }
1790
1791 /*
1792  * Reads an NVM section completely.
1793  * NICs prior to 7000 family don't have a real NVM, but just read
1794  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1795  * by uCode, we need to manually check in this case that we don't
1796  * overflow and try to read more than the EEPROM size.
1797  * For 7000 family NICs, we supply the maximal size we can read, and
1798  * the uCode fills the response with as much data as we can,
1799  * without overflowing, so no check is needed.
1800  */
1801 static int
1802 iwm_nvm_read_section(struct iwm_softc *sc,
1803         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1804 {
1805         uint16_t seglen, length, offset = 0;
1806         int ret;
1807
1808         /* Set nvm section read length */
1809         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1810
1811         seglen = length;
1812
1813         /* Read the NVM until exhausted (reading less than requested) */
1814         while (seglen == length) {
1815                 /* Check no memory assumptions fail and cause an overflow */
1816                 if ((size_read + offset + length) >
1817                     sc->cfg->eeprom_size) {
1818                         device_printf(sc->sc_dev,
1819                             "EEPROM size is too small for NVM\n");
1820                         return ENOBUFS;
1821                 }
1822
1823                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1824                 if (ret) {
1825                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1826                                     "Cannot read NVM from section %d offset %d, length %d\n",
1827                                     section, offset, length);
1828                         return ret;
1829                 }
1830                 offset += seglen;
1831         }
1832
1833         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1834                     "NVM section %d read completed\n", section);
1835         *len = offset;
1836         return 0;
1837 }
1838
1839 /*
1840  * BEGIN IWM_NVM_PARSE
1841  */
1842
1843 /* iwlwifi/iwl-nvm-parse.c */
1844
1845 /* NVM offsets (in words) definitions */
1846 enum iwm_nvm_offsets {
1847         /* NVM HW-Section offset (in words) definitions */
1848         IWM_HW_ADDR = 0x15,
1849
1850 /* NVM SW-Section offset (in words) definitions */
1851         IWM_NVM_SW_SECTION = 0x1C0,
1852         IWM_NVM_VERSION = 0,
1853         IWM_RADIO_CFG = 1,
1854         IWM_SKU = 2,
1855         IWM_N_HW_ADDRS = 3,
1856         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1857
1858 /* NVM calibration section offset (in words) definitions */
1859         IWM_NVM_CALIB_SECTION = 0x2B8,
1860         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1861 };
1862
1863 enum iwm_8000_nvm_offsets {
1864         /* NVM HW-Section offset (in words) definitions */
1865         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1866         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1867         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1868         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1869         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1870
1871         /* NVM SW-Section offset (in words) definitions */
1872         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1873         IWM_NVM_VERSION_8000 = 0,
1874         IWM_RADIO_CFG_8000 = 0,
1875         IWM_SKU_8000 = 2,
1876         IWM_N_HW_ADDRS_8000 = 3,
1877
1878         /* NVM REGULATORY -Section offset (in words) definitions */
1879         IWM_NVM_CHANNELS_8000 = 0,
1880         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1881         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1882         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1883
1884         /* NVM calibration section offset (in words) definitions */
1885         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1886         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1887 };
1888
1889 /* SKU Capabilities (actual values from NVM definition) */
1890 enum nvm_sku_bits {
1891         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1892         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1893         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1894         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1895 };
1896
1897 /* radio config bits (actual values from NVM definition) */
1898 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1899 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1900 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1901 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1902 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1903 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1904
1905 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1906 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1907 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1908 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1909 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1910 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1911
1912 #define DEFAULT_MAX_TX_POWER 16
1913
1914 /**
1915  * enum iwm_nvm_channel_flags - channel flags in NVM
1916  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1917  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1918  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1919  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1920  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1921  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1922  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1923  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1924  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1925  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1926  */
1927 enum iwm_nvm_channel_flags {
1928         IWM_NVM_CHANNEL_VALID = (1 << 0),
1929         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1930         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1931         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1932         IWM_NVM_CHANNEL_DFS = (1 << 7),
1933         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1934         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1935         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1936         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1937 };
1938
1939 /*
1940  * Translate EEPROM flags to net80211.
1941  */
1942 static uint32_t
1943 iwm_eeprom_channel_flags(uint16_t ch_flags)
1944 {
1945         uint32_t nflags;
1946
1947         nflags = 0;
1948         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1949                 nflags |= IEEE80211_CHAN_PASSIVE;
1950         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1951                 nflags |= IEEE80211_CHAN_NOADHOC;
1952         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1953                 nflags |= IEEE80211_CHAN_DFS;
1954                 /* Just in case. */
1955                 nflags |= IEEE80211_CHAN_NOADHOC;
1956         }
1957
1958         return (nflags);
1959 }
1960
1961 static void
1962 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1963     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1964     const uint8_t bands[])
1965 {
1966         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1967         uint32_t nflags;
1968         uint16_t ch_flags;
1969         uint8_t ieee;
1970         int error;
1971
1972         for (; ch_idx < ch_num; ch_idx++) {
1973                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1974                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1975                         ieee = iwm_nvm_channels[ch_idx];
1976                 else
1977                         ieee = iwm_nvm_channels_8000[ch_idx];
1978
1979                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1980                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1981                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
1982                             ieee, ch_flags,
1983                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1984                             "5.2" : "2.4");
1985                         continue;
1986                 }
1987
1988                 nflags = iwm_eeprom_channel_flags(ch_flags);
1989                 error = ieee80211_add_channel(chans, maxchans, nchans,
1990                     ieee, 0, 0, nflags, bands);
1991                 if (error != 0)
1992                         break;
1993
1994                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1995                     "Ch. %d Flags %x [%sGHz] - Added\n",
1996                     ieee, ch_flags,
1997                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1998                     "5.2" : "2.4");
1999         }
2000 }
2001
2002 static void
2003 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2004     struct ieee80211_channel chans[])
2005 {
2006         struct iwm_softc *sc = ic->ic_softc;
2007         struct iwm_nvm_data *data = sc->nvm_data;
2008         uint8_t bands[IEEE80211_MODE_BYTES];
2009         size_t ch_num;
2010
2011         memset(bands, 0, sizeof(bands));
2012         /* 1-13: 11b/g channels. */
2013         setbit(bands, IEEE80211_MODE_11B);
2014         setbit(bands, IEEE80211_MODE_11G);
2015         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2016             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2017
2018         /* 14: 11b channel only. */
2019         clrbit(bands, IEEE80211_MODE_11G);
2020         iwm_add_channel_band(sc, chans, maxchans, nchans,
2021             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2022
2023         if (data->sku_cap_band_52GHz_enable) {
2024                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2025                         ch_num = nitems(iwm_nvm_channels);
2026                 else
2027                         ch_num = nitems(iwm_nvm_channels_8000);
2028                 memset(bands, 0, sizeof(bands));
2029                 setbit(bands, IEEE80211_MODE_11A);
2030                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2031                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2032         }
2033 }
2034
2035 static void
2036 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2037         const uint16_t *mac_override, const uint16_t *nvm_hw)
2038 {
2039         const uint8_t *hw_addr;
2040
2041         if (mac_override) {
2042                 static const uint8_t reserved_mac[] = {
2043                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2044                 };
2045
2046                 hw_addr = (const uint8_t *)(mac_override +
2047                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2048
2049                 /*
2050                  * Store the MAC address from MAO section.
2051                  * No byte swapping is required in MAO section
2052                  */
2053                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2054
2055                 /*
2056                  * Force the use of the OTP MAC address in case of reserved MAC
2057                  * address in the NVM, or if address is given but invalid.
2058                  */
2059                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2060                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2061                     iwm_is_valid_ether_addr(data->hw_addr) &&
2062                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2063                         return;
2064
2065                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2066                     "%s: mac address from nvm override section invalid\n",
2067                     __func__);
2068         }
2069
2070         if (nvm_hw) {
2071                 /* read the mac address from WFMP registers */
2072                 uint32_t mac_addr0 =
2073                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2074                 uint32_t mac_addr1 =
2075                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2076
2077                 hw_addr = (const uint8_t *)&mac_addr0;
2078                 data->hw_addr[0] = hw_addr[3];
2079                 data->hw_addr[1] = hw_addr[2];
2080                 data->hw_addr[2] = hw_addr[1];
2081                 data->hw_addr[3] = hw_addr[0];
2082
2083                 hw_addr = (const uint8_t *)&mac_addr1;
2084                 data->hw_addr[4] = hw_addr[1];
2085                 data->hw_addr[5] = hw_addr[0];
2086
2087                 return;
2088         }
2089
2090         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2091         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2092 }
2093
2094 static int
2095 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2096             const uint16_t *phy_sku)
2097 {
2098         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2099                 return le16_to_cpup(nvm_sw + IWM_SKU);
2100
2101         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2102 }
2103
2104 static int
2105 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2106 {
2107         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2108                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2109         else
2110                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2111                                                 IWM_NVM_VERSION_8000));
2112 }
2113
2114 static int
2115 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2116                   const uint16_t *phy_sku)
2117 {
2118         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2119                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2120
2121         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2122 }
2123
2124 static int
2125 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2126 {
2127         int n_hw_addr;
2128
2129         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2130                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2131
2132         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2133
2134         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2135 }
2136
2137 static void
2138 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2139                   uint32_t radio_cfg)
2140 {
2141         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2142                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2143                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2144                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2145                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2146                 return;
2147         }
2148
2149         /* set the radio configuration for family 8000 */
2150         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2151         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2152         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2153         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2154         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2155         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2156 }
2157
2158 static int
2159 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2160                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2161 {
2162 #ifdef notyet /* for FAMILY 9000 */
2163         if (cfg->mac_addr_from_csr) {
2164                 iwm_set_hw_address_from_csr(sc, data);
2165         } else
2166 #endif
2167         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2168                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2169
2170                 /* The byte order is little endian 16 bit, meaning 214365 */
2171                 data->hw_addr[0] = hw_addr[1];
2172                 data->hw_addr[1] = hw_addr[0];
2173                 data->hw_addr[2] = hw_addr[3];
2174                 data->hw_addr[3] = hw_addr[2];
2175                 data->hw_addr[4] = hw_addr[5];
2176                 data->hw_addr[5] = hw_addr[4];
2177         } else {
2178                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2179         }
2180
2181         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2182                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2183                 return EINVAL;
2184         }
2185
2186         return 0;
2187 }
2188
2189 static struct iwm_nvm_data *
2190 iwm_parse_nvm_data(struct iwm_softc *sc,
2191                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2192                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2193                    const uint16_t *phy_sku, const uint16_t *regulatory)
2194 {
2195         struct iwm_nvm_data *data;
2196         uint32_t sku, radio_cfg;
2197
2198         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2199                 data = malloc(sizeof(*data) +
2200                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2201                     M_DEVBUF, M_NOWAIT | M_ZERO);
2202         } else {
2203                 data = malloc(sizeof(*data) +
2204                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2205                     M_DEVBUF, M_NOWAIT | M_ZERO);
2206         }
2207         if (!data)
2208                 return NULL;
2209
2210         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2211
2212         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2213         iwm_set_radio_cfg(sc, data, radio_cfg);
2214
2215         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2216         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2217         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2218         data->sku_cap_11n_enable = 0;
2219
2220         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2221
2222         /* If no valid mac address was found - bail out */
2223         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2224                 free(data, M_DEVBUF);
2225                 return NULL;
2226         }
2227
2228         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2229                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2230                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2231         } else {
2232                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2233                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2234         }
2235
2236         return data;
2237 }
2238
2239 static void
2240 iwm_free_nvm_data(struct iwm_nvm_data *data)
2241 {
2242         if (data != NULL)
2243                 free(data, M_DEVBUF);
2244 }
2245
2246 static struct iwm_nvm_data *
2247 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2248 {
2249         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2250
2251         /* Checking for required sections */
2252         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2253                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2254                     !sections[sc->cfg->nvm_hw_section_num].data) {
2255                         device_printf(sc->sc_dev,
2256                             "Can't parse empty OTP/NVM sections\n");
2257                         return NULL;
2258                 }
2259         } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2260                 /* SW and REGULATORY sections are mandatory */
2261                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2262                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2263                         device_printf(sc->sc_dev,
2264                             "Can't parse empty OTP/NVM sections\n");
2265                         return NULL;
2266                 }
2267                 /* MAC_OVERRIDE or at least HW section must exist */
2268                 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2269                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2270                         device_printf(sc->sc_dev,
2271                             "Can't parse mac_address, empty sections\n");
2272                         return NULL;
2273                 }
2274
2275                 /* PHY_SKU section is mandatory in B0 */
2276                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2277                         device_printf(sc->sc_dev,
2278                             "Can't parse phy_sku in B0, empty sections\n");
2279                         return NULL;
2280                 }
2281         } else {
2282                 panic("unknown device family %d\n", sc->cfg->device_family);
2283         }
2284
2285         hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2286         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2287         calib = (const uint16_t *)
2288             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2289         regulatory = (const uint16_t *)
2290             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2291         mac_override = (const uint16_t *)
2292             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2293         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2294
2295         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2296             phy_sku, regulatory);
2297 }
2298
2299 static int
2300 iwm_nvm_init(struct iwm_softc *sc)
2301 {
2302         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2303         int i, ret, section;
2304         uint32_t size_read = 0;
2305         uint8_t *nvm_buffer, *temp;
2306         uint16_t len;
2307
2308         memset(nvm_sections, 0, sizeof(nvm_sections));
2309
2310         if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2311                 return EINVAL;
2312
2313         /* load NVM values from nic */
2314         /* Read From FW NVM */
2315         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2316
2317         nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2318         if (!nvm_buffer)
2319                 return ENOMEM;
2320         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2321                 /* we override the constness for initial read */
2322                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2323                                            &len, size_read);
2324                 if (ret)
2325                         continue;
2326                 size_read += len;
2327                 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2328                 if (!temp) {
2329                         ret = ENOMEM;
2330                         break;
2331                 }
2332                 memcpy(temp, nvm_buffer, len);
2333
2334                 nvm_sections[section].data = temp;
2335                 nvm_sections[section].length = len;
2336         }
2337         if (!size_read)
2338                 device_printf(sc->sc_dev, "OTP is blank\n");
2339         free(nvm_buffer, M_DEVBUF);
2340
2341         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2342         if (!sc->nvm_data)
2343                 return EINVAL;
2344         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2345                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2346
2347         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2348                 if (nvm_sections[i].data != NULL)
2349                         free(nvm_sections[i].data, M_DEVBUF);
2350         }
2351
2352         return 0;
2353 }
2354
2355 static int
2356 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2357         const struct iwm_fw_desc *section)
2358 {
2359         struct iwm_dma_info *dma = &sc->fw_dma;
2360         uint8_t *v_addr;
2361         bus_addr_t p_addr;
2362         uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2363         int ret = 0;
2364
2365         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2366                     "%s: [%d] uCode section being loaded...\n",
2367                     __func__, section_num);
2368
2369         v_addr = dma->vaddr;
2370         p_addr = dma->paddr;
2371
2372         for (offset = 0; offset < section->len; offset += chunk_sz) {
2373                 uint32_t copy_size, dst_addr;
2374                 int extended_addr = FALSE;
2375
2376                 copy_size = MIN(chunk_sz, section->len - offset);
2377                 dst_addr = section->offset + offset;
2378
2379                 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2380                     dst_addr <= IWM_FW_MEM_EXTENDED_END)
2381                         extended_addr = TRUE;
2382
2383                 if (extended_addr)
2384                         iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2385                                           IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2386
2387                 memcpy(v_addr, (const uint8_t *)section->data + offset,
2388                     copy_size);
2389                 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2390                 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2391                                                    copy_size);
2392
2393                 if (extended_addr)
2394                         iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2395                                             IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2396
2397                 if (ret) {
2398                         device_printf(sc->sc_dev,
2399                             "%s: Could not load the [%d] uCode section\n",
2400                             __func__, section_num);
2401                         break;
2402                 }
2403         }
2404
2405         return ret;
2406 }
2407
2408 /*
2409  * ucode
2410  */
2411 static int
2412 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2413                              bus_addr_t phy_addr, uint32_t byte_cnt)
2414 {
2415         int ret;
2416
2417         sc->sc_fw_chunk_done = 0;
2418
2419         if (!iwm_nic_lock(sc))
2420                 return EBUSY;
2421
2422         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2423             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2424
2425         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2426             dst_addr);
2427
2428         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2429             phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2430
2431         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2432             (iwm_get_dma_hi_addr(phy_addr)
2433              << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2434
2435         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2436             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2437             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2438             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2439
2440         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2441             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2442             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2443             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2444
2445         iwm_nic_unlock(sc);
2446
2447         /* wait up to 5s for this segment to load */
2448         ret = 0;
2449         while (!sc->sc_fw_chunk_done) {
2450                 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2451                 if (ret)
2452                         break;
2453         }
2454
2455         if (ret != 0) {
2456                 device_printf(sc->sc_dev,
2457                     "fw chunk addr 0x%x len %d failed to load\n",
2458                     dst_addr, byte_cnt);
2459                 return ETIMEDOUT;
2460         }
2461
2462         return 0;
2463 }
2464
2465 static int
2466 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2467         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2468 {
2469         int shift_param;
2470         int i, ret = 0, sec_num = 0x1;
2471         uint32_t val, last_read_idx = 0;
2472
2473         if (cpu == 1) {
2474                 shift_param = 0;
2475                 *first_ucode_section = 0;
2476         } else {
2477                 shift_param = 16;
2478                 (*first_ucode_section)++;
2479         }
2480
2481         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2482                 last_read_idx = i;
2483
2484                 /*
2485                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2486                  * CPU1 to CPU2.
2487                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2488                  * CPU2 non paged to CPU2 paging sec.
2489                  */
2490                 if (!image->fw_sect[i].data ||
2491                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2492                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2493                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2494                                     "Break since Data not valid or Empty section, sec = %d\n",
2495                                     i);
2496                         break;
2497                 }
2498                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2499                 if (ret)
2500                         return ret;
2501
2502                 /* Notify the ucode of the loaded section number and status */
2503                 if (iwm_nic_lock(sc)) {
2504                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2505                         val = val | (sec_num << shift_param);
2506                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2507                         sec_num = (sec_num << 1) | 0x1;
2508                         iwm_nic_unlock(sc);
2509                 }
2510         }
2511
2512         *first_ucode_section = last_read_idx;
2513
2514         iwm_enable_interrupts(sc);
2515
2516         if (iwm_nic_lock(sc)) {
2517                 if (cpu == 1)
2518                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2519                 else
2520                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2521                 iwm_nic_unlock(sc);
2522         }
2523
2524         return 0;
2525 }
2526
2527 static int
2528 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2529         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2530 {
2531         int shift_param;
2532         int i, ret = 0;
2533         uint32_t last_read_idx = 0;
2534
2535         if (cpu == 1) {
2536                 shift_param = 0;
2537                 *first_ucode_section = 0;
2538         } else {
2539                 shift_param = 16;
2540                 (*first_ucode_section)++;
2541         }
2542
2543         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2544                 last_read_idx = i;
2545
2546                 /*
2547                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2548                  * CPU1 to CPU2.
2549                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2550                  * CPU2 non paged to CPU2 paging sec.
2551                  */
2552                 if (!image->fw_sect[i].data ||
2553                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2554                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2555                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2556                                     "Break since Data not valid or Empty section, sec = %d\n",
2557                                      i);
2558                         break;
2559                 }
2560
2561                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2562                 if (ret)
2563                         return ret;
2564         }
2565
2566         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2567                 iwm_set_bits_prph(sc,
2568                                   IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2569                                   (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2570                                    IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2571                                    IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2572                                         shift_param);
2573
2574         *first_ucode_section = last_read_idx;
2575
2576         return 0;
2577
2578 }
2579
2580 static int
2581 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2582         const struct iwm_fw_sects *image)
2583 {
2584         int ret = 0;
2585         int first_ucode_section;
2586
2587         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2588                      image->is_dual_cpus ? "Dual" : "Single");
2589
2590         /* load to FW the binary non secured sections of CPU1 */
2591         ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2592         if (ret)
2593                 return ret;
2594
2595         if (image->is_dual_cpus) {
2596                 /* set CPU2 header address */
2597                 if (iwm_nic_lock(sc)) {
2598                         iwm_write_prph(sc,
2599                                        IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2600                                        IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2601                         iwm_nic_unlock(sc);
2602                 }
2603
2604                 /* load to FW the binary sections of CPU2 */
2605                 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2606                                                  &first_ucode_section);
2607                 if (ret)
2608                         return ret;
2609         }
2610
2611         iwm_enable_interrupts(sc);
2612
2613         /* release CPU reset */
2614         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2615
2616         return 0;
2617 }
2618
2619 int
2620 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2621         const struct iwm_fw_sects *image)
2622 {
2623         int ret = 0;
2624         int first_ucode_section;
2625
2626         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2627                     image->is_dual_cpus ? "Dual" : "Single");
2628
2629         /* configure the ucode to be ready to get the secured image */
2630         /* release CPU reset */
2631         if (iwm_nic_lock(sc)) {
2632                 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2633                     IWM_RELEASE_CPU_RESET_BIT);
2634                 iwm_nic_unlock(sc);
2635         }
2636
2637         /* load to FW the binary Secured sections of CPU1 */
2638         ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2639             &first_ucode_section);
2640         if (ret)
2641                 return ret;
2642
2643         /* load to FW the binary sections of CPU2 */
2644         return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2645             &first_ucode_section);
2646 }
2647
2648 /* XXX Get rid of this definition */
2649 static inline void
2650 iwm_enable_fw_load_int(struct iwm_softc *sc)
2651 {
2652         IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2653         sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2654         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2655 }
2656
2657 /* XXX Add proper rfkill support code */
2658 static int
2659 iwm_start_fw(struct iwm_softc *sc,
2660         const struct iwm_fw_sects *fw)
2661 {
2662         int ret;
2663
2664         /* This may fail if AMT took ownership of the device */
2665         if (iwm_prepare_card_hw(sc)) {
2666                 device_printf(sc->sc_dev,
2667                     "%s: Exit HW not ready\n", __func__);
2668                 ret = EIO;
2669                 goto out;
2670         }
2671
2672         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2673
2674         iwm_disable_interrupts(sc);
2675
2676         /* make sure rfkill handshake bits are cleared */
2677         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2678         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2679             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2680
2681         /* clear (again), then enable host interrupts */
2682         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2683
2684         ret = iwm_nic_init(sc);
2685         if (ret) {
2686                 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2687                 goto out;
2688         }
2689
2690         /*
2691          * Now, we load the firmware and don't want to be interrupted, even
2692          * by the RF-Kill interrupt (hence mask all the interrupt besides the
2693          * FH_TX interrupt which is needed to load the firmware). If the
2694          * RF-Kill switch is toggled, we will find out after having loaded
2695          * the firmware and return the proper value to the caller.
2696          */
2697         iwm_enable_fw_load_int(sc);
2698
2699         /* really make sure rfkill handshake bits are cleared */
2700         /* maybe we should write a few times more?  just to make sure */
2701         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2702         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2703
2704         /* Load the given image to the HW */
2705         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2706                 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2707         else
2708                 ret = iwm_pcie_load_given_ucode(sc, fw);
2709
2710         /* XXX re-check RF-Kill state */
2711
2712 out:
2713         return ret;
2714 }
2715
2716 static int
2717 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2718 {
2719         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2720                 .valid = htole32(valid_tx_ant),
2721         };
2722
2723         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2724             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2725 }
2726
2727 /* iwlwifi: mvm/fw.c */
2728 static int
2729 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2730 {
2731         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2732         enum iwm_ucode_type ucode_type = sc->cur_ucode;
2733
2734         /* Set parameters */
2735         phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2736         phy_cfg_cmd.calib_control.event_trigger =
2737             sc->sc_default_calib[ucode_type].event_trigger;
2738         phy_cfg_cmd.calib_control.flow_trigger =
2739             sc->sc_default_calib[ucode_type].flow_trigger;
2740
2741         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2742             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2743         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2744             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2745 }
2746
2747 static int
2748 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2749 {
2750         struct iwm_mvm_alive_data *alive_data = data;
2751         struct iwm_mvm_alive_resp_ver1 *palive1;
2752         struct iwm_mvm_alive_resp_ver2 *palive2;
2753         struct iwm_mvm_alive_resp *palive;
2754
2755         if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2756                 palive1 = (void *)pkt->data;
2757
2758                 sc->support_umac_log = FALSE;
2759                 sc->error_event_table =
2760                         le32toh(palive1->error_event_table_ptr);
2761                 sc->log_event_table =
2762                         le32toh(palive1->log_event_table_ptr);
2763                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2764
2765                 alive_data->valid = le16toh(palive1->status) ==
2766                                     IWM_ALIVE_STATUS_OK;
2767                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2768                             "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2769                              le16toh(palive1->status), palive1->ver_type,
2770                              palive1->ver_subtype, palive1->flags);
2771         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2772                 palive2 = (void *)pkt->data;
2773                 sc->error_event_table =
2774                         le32toh(palive2->error_event_table_ptr);
2775                 sc->log_event_table =
2776                         le32toh(palive2->log_event_table_ptr);
2777                 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2778                 sc->umac_error_event_table =
2779                         le32toh(palive2->error_info_addr);
2780
2781                 alive_data->valid = le16toh(palive2->status) ==
2782                                     IWM_ALIVE_STATUS_OK;
2783                 if (sc->umac_error_event_table)
2784                         sc->support_umac_log = TRUE;
2785
2786                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2787                             "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2788                             le16toh(palive2->status), palive2->ver_type,
2789                             palive2->ver_subtype, palive2->flags);
2790
2791                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2792                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2793                             palive2->umac_major, palive2->umac_minor);
2794         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2795                 palive = (void *)pkt->data;
2796
2797                 sc->error_event_table =
2798                         le32toh(palive->error_event_table_ptr);
2799                 sc->log_event_table =
2800                         le32toh(palive->log_event_table_ptr);
2801                 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2802                 sc->umac_error_event_table =
2803                         le32toh(palive->error_info_addr);
2804
2805                 alive_data->valid = le16toh(palive->status) ==
2806                                     IWM_ALIVE_STATUS_OK;
2807                 if (sc->umac_error_event_table)
2808                         sc->support_umac_log = TRUE;
2809
2810                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2811                             "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2812                             le16toh(palive->status), palive->ver_type,
2813                             palive->ver_subtype, palive->flags);
2814
2815                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2816                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2817                             le32toh(palive->umac_major),
2818                             le32toh(palive->umac_minor));
2819         }
2820
2821         return TRUE;
2822 }
2823
2824 static int
2825 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2826         struct iwm_rx_packet *pkt, void *data)
2827 {
2828         struct iwm_phy_db *phy_db = data;
2829
2830         if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2831                 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2832                         device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2833                             __func__, pkt->hdr.code);
2834                 }
2835                 return TRUE;
2836         }
2837
2838         if (iwm_phy_db_set_section(phy_db, pkt)) {
2839                 device_printf(sc->sc_dev,
2840                     "%s: iwm_phy_db_set_section failed\n", __func__);
2841         }
2842
2843         return FALSE;
2844 }
2845
2846 static int
2847 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2848         enum iwm_ucode_type ucode_type)
2849 {
2850         struct iwm_notification_wait alive_wait;
2851         struct iwm_mvm_alive_data alive_data;
2852         const struct iwm_fw_sects *fw;
2853         enum iwm_ucode_type old_type = sc->cur_ucode;
2854         int error;
2855         static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2856
2857         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2858                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2859                         error);
2860                 return error;
2861         }
2862         fw = &sc->sc_fw.fw_sects[ucode_type];
2863         sc->cur_ucode = ucode_type;
2864         sc->ucode_loaded = FALSE;
2865
2866         memset(&alive_data, 0, sizeof(alive_data));
2867         iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2868                                    alive_cmd, nitems(alive_cmd),
2869                                    iwm_alive_fn, &alive_data);
2870
2871         error = iwm_start_fw(sc, fw);
2872         if (error) {
2873                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2874                 sc->cur_ucode = old_type;
2875                 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2876                 return error;
2877         }
2878
2879         /*
2880          * Some things may run in the background now, but we
2881          * just wait for the ALIVE notification here.
2882          */
2883         IWM_UNLOCK(sc);
2884         error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2885                                       IWM_MVM_UCODE_ALIVE_TIMEOUT);
2886         IWM_LOCK(sc);
2887         if (error) {
2888                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2889                         uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2890                         if (iwm_nic_lock(sc)) {
2891                                 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2892                                 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2893                                 iwm_nic_unlock(sc);
2894                         }
2895                         device_printf(sc->sc_dev,
2896                             "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2897                             a, b);
2898                 }
2899                 sc->cur_ucode = old_type;
2900                 return error;
2901         }
2902
2903         if (!alive_data.valid) {
2904                 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2905                     __func__);
2906                 sc->cur_ucode = old_type;
2907                 return EIO;
2908         }
2909
2910         iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2911
2912         /*
2913          * configure and operate fw paging mechanism.
2914          * driver configures the paging flow only once, CPU2 paging image
2915          * included in the IWM_UCODE_INIT image.
2916          */
2917         if (fw->paging_mem_size) {
2918                 error = iwm_save_fw_paging(sc, fw);
2919                 if (error) {
2920                         device_printf(sc->sc_dev,
2921                             "%s: failed to save the FW paging image\n",
2922                             __func__);
2923                         return error;
2924                 }
2925
2926                 error = iwm_send_paging_cmd(sc, fw);
2927                 if (error) {
2928                         device_printf(sc->sc_dev,
2929                             "%s: failed to send the paging cmd\n", __func__);
2930                         iwm_free_fw_paging(sc);
2931                         return error;
2932                 }
2933         }
2934
2935         if (!error)
2936                 sc->ucode_loaded = TRUE;
2937         return error;
2938 }
2939
2940 /*
2941  * mvm misc bits
2942  */
2943
2944 /*
2945  * follows iwlwifi/fw.c
2946  */
2947 static int
2948 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2949 {
2950         struct iwm_notification_wait calib_wait;
2951         static const uint16_t init_complete[] = {
2952                 IWM_INIT_COMPLETE_NOTIF,
2953                 IWM_CALIB_RES_NOTIF_PHY_DB
2954         };
2955         int ret;
2956
2957         /* do not operate with rfkill switch turned on */
2958         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2959                 device_printf(sc->sc_dev,
2960                     "radio is disabled by hardware switch\n");
2961                 return EPERM;
2962         }
2963
2964         iwm_init_notification_wait(sc->sc_notif_wait,
2965                                    &calib_wait,
2966                                    init_complete,
2967                                    nitems(init_complete),
2968                                    iwm_wait_phy_db_entry,
2969                                    sc->sc_phy_db);
2970
2971         /* Will also start the device */
2972         ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2973         if (ret) {
2974                 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2975                     ret);
2976                 goto error;
2977         }
2978
2979         if (justnvm) {
2980                 /* Read nvm */
2981                 ret = iwm_nvm_init(sc);
2982                 if (ret) {
2983                         device_printf(sc->sc_dev, "failed to read nvm\n");
2984                         goto error;
2985                 }
2986                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2987                 goto error;
2988         }
2989
2990         ret = iwm_send_bt_init_conf(sc);
2991         if (ret) {
2992                 device_printf(sc->sc_dev,
2993                     "failed to send bt coex configuration: %d\n", ret);
2994                 goto error;
2995         }
2996
2997         /* Init Smart FIFO. */
2998         ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2999         if (ret)
3000                 goto error;
3001
3002         /* Send TX valid antennas before triggering calibrations */
3003         ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3004         if (ret) {
3005                 device_printf(sc->sc_dev,
3006                     "failed to send antennas before calibration: %d\n", ret);
3007                 goto error;
3008         }
3009
3010         /*
3011          * Send phy configurations command to init uCode
3012          * to start the 16.0 uCode init image internal calibrations.
3013          */
3014         ret = iwm_send_phy_cfg_cmd(sc);
3015         if (ret) {
3016                 device_printf(sc->sc_dev,
3017                     "%s: Failed to run INIT calibrations: %d\n",
3018                     __func__, ret);
3019                 goto error;
3020         }
3021
3022         /*
3023          * Nothing to do but wait for the init complete notification
3024          * from the firmware.
3025          */
3026         IWM_UNLOCK(sc);
3027         ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3028             IWM_MVM_UCODE_CALIB_TIMEOUT);
3029         IWM_LOCK(sc);
3030
3031
3032         goto out;
3033
3034 error:
3035         iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3036 out:
3037         return ret;
3038 }
3039
3040 /*
3041  * receive side
3042  */
3043
3044 /* (re)stock rx ring, called at init-time and at runtime */
3045 static int
3046 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3047 {
3048         struct iwm_rx_ring *ring = &sc->rxq;
3049         struct iwm_rx_data *data = &ring->data[idx];
3050         struct mbuf *m;
3051         bus_dmamap_t dmamap;
3052         bus_dma_segment_t seg;
3053         int nsegs, error;
3054
3055         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3056         if (m == NULL)
3057                 return ENOBUFS;
3058
3059         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3060         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3061             &seg, &nsegs, BUS_DMA_NOWAIT);
3062         if (error != 0) {
3063                 device_printf(sc->sc_dev,
3064                     "%s: can't map mbuf, error %d\n", __func__, error);
3065                 m_freem(m);
3066                 return error;
3067         }
3068
3069         if (data->m != NULL)
3070                 bus_dmamap_unload(ring->data_dmat, data->map);
3071
3072         /* Swap ring->spare_map with data->map */
3073         dmamap = data->map;
3074         data->map = ring->spare_map;
3075         ring->spare_map = dmamap;
3076
3077         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3078         data->m = m;
3079
3080         /* Update RX descriptor. */
3081         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3082         ring->desc[idx] = htole32(seg.ds_addr >> 8);
3083         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3084             BUS_DMASYNC_PREWRITE);
3085
3086         return 0;
3087 }
3088
3089 /* iwlwifi: mvm/rx.c */
3090 /*
3091  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3092  * values are reported by the fw as positive values - need to negate
3093  * to obtain their dBM.  Account for missing antennas by replacing 0
3094  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3095  */
3096 static int
3097 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3098 {
3099         int energy_a, energy_b, energy_c, max_energy;
3100         uint32_t val;
3101
3102         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3103         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3104             IWM_RX_INFO_ENERGY_ANT_A_POS;
3105         energy_a = energy_a ? -energy_a : -256;
3106         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3107             IWM_RX_INFO_ENERGY_ANT_B_POS;
3108         energy_b = energy_b ? -energy_b : -256;
3109         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3110             IWM_RX_INFO_ENERGY_ANT_C_POS;
3111         energy_c = energy_c ? -energy_c : -256;
3112         max_energy = MAX(energy_a, energy_b);
3113         max_energy = MAX(max_energy, energy_c);
3114
3115         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3116             "energy In A %d B %d C %d , and max %d\n",
3117             energy_a, energy_b, energy_c, max_energy);
3118
3119         return max_energy;
3120 }
3121
3122 static void
3123 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
3124         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3125 {
3126         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3127
3128         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3129         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3130
3131         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3132 }
3133
3134 /*
3135  * Retrieve the average noise (in dBm) among receivers.
3136  */
3137 static int
3138 iwm_get_noise(struct iwm_softc *sc,
3139     const struct iwm_mvm_statistics_rx_non_phy *stats)
3140 {
3141         int i, total, nbant, noise;
3142
3143         total = nbant = noise = 0;
3144         for (i = 0; i < 3; i++) {
3145                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3146                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3147                     __func__,
3148                     i,
3149                     noise);
3150
3151                 if (noise) {
3152                         total += noise;
3153                         nbant++;
3154                 }
3155         }
3156
3157         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3158             __func__, nbant, total);
3159 #if 0
3160         /* There should be at least one antenna but check anyway. */
3161         return (nbant == 0) ? -127 : (total / nbant) - 107;
3162 #else
3163         /* For now, just hard-code it to -96 to be safe */
3164         return (-96);
3165 #endif
3166 }
3167
3168 /*
3169  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3170  *
3171  * Handles the actual data of the Rx packet from the fw
3172  */
3173 static void
3174 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m)
3175 {
3176         struct ieee80211com *ic = &sc->sc_ic;
3177         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3178         struct ieee80211_frame *wh;
3179         struct ieee80211_node *ni;
3180         struct ieee80211_rx_stats rxs;
3181         struct iwm_rx_phy_info *phy_info;
3182         struct iwm_rx_mpdu_res_start *rx_res;
3183         struct iwm_rx_packet *pkt = mtod(m, struct iwm_rx_packet *);
3184         uint32_t len;
3185         uint32_t rx_pkt_status;
3186         int rssi;
3187
3188         phy_info = &sc->sc_last_phy_info;
3189         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3190         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3191         len = le16toh(rx_res->byte_count);
3192         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3193
3194         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3195                 device_printf(sc->sc_dev,
3196                     "dsp size out of range [0,20]: %d\n",
3197                     phy_info->cfg_phy_cnt);
3198                 goto fail;
3199         }
3200
3201         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3202             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3203                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3204                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3205                 goto fail;
3206         }
3207
3208         rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3209
3210         /* Map it to relative value */
3211         rssi = rssi - sc->sc_noise;
3212
3213         /* replenish ring for the buffer we're going to feed to the sharks */
3214         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3215                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3216                     __func__);
3217                 goto fail;
3218         }
3219
3220         m->m_data = pkt->data + sizeof(*rx_res);
3221         m->m_pkthdr.len = m->m_len = len;
3222
3223         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3224             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3225
3226         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3227
3228         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3229             "%s: phy_info: channel=%d, flags=0x%08x\n",
3230             __func__,
3231             le16toh(phy_info->channel),
3232             le16toh(phy_info->phy_flags));
3233
3234         /*
3235          * Populate an RX state struct with the provided information.
3236          */
3237         bzero(&rxs, sizeof(rxs));
3238         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3239         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3240         rxs.c_ieee = le16toh(phy_info->channel);
3241         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3242                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3243         } else {
3244                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3245         }
3246
3247         /* rssi is in 1/2db units */
3248         rxs.c_rssi = rssi * 2;
3249         rxs.c_nf = sc->sc_noise;
3250         if (ieee80211_add_rx_params(m, &rxs) == 0) {
3251                 if (ni)
3252                         ieee80211_free_node(ni);
3253                 goto fail;
3254         }
3255
3256         if (ieee80211_radiotap_active_vap(vap)) {
3257                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3258
3259                 tap->wr_flags = 0;
3260                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3261                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3262                 tap->wr_chan_freq = htole16(rxs.c_freq);
3263                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3264                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3265                 tap->wr_dbm_antsignal = (int8_t)rssi;
3266                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3267                 tap->wr_tsft = phy_info->system_timestamp;
3268                 switch (phy_info->rate) {
3269                 /* CCK rates. */
3270                 case  10: tap->wr_rate =   2; break;
3271                 case  20: tap->wr_rate =   4; break;
3272                 case  55: tap->wr_rate =  11; break;
3273                 case 110: tap->wr_rate =  22; break;
3274                 /* OFDM rates. */
3275                 case 0xd: tap->wr_rate =  12; break;
3276                 case 0xf: tap->wr_rate =  18; break;
3277                 case 0x5: tap->wr_rate =  24; break;
3278                 case 0x7: tap->wr_rate =  36; break;
3279                 case 0x9: tap->wr_rate =  48; break;
3280                 case 0xb: tap->wr_rate =  72; break;
3281                 case 0x1: tap->wr_rate =  96; break;
3282                 case 0x3: tap->wr_rate = 108; break;
3283                 /* Unknown rate: should not happen. */
3284                 default:  tap->wr_rate =   0;
3285                 }
3286         }
3287
3288         IWM_UNLOCK(sc);
3289         if (ni != NULL) {
3290                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3291                 ieee80211_input_mimo(ni, m);
3292                 ieee80211_free_node(ni);
3293         } else {
3294                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3295                 ieee80211_input_mimo_all(ic, m);
3296         }
3297         IWM_LOCK(sc);
3298
3299         return;
3300
3301 fail:
3302         counter_u64_add(ic->ic_ierrors, 1);
3303 }
3304
3305 static int
3306 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3307         struct iwm_node *in)
3308 {
3309         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3310         struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3311         struct ieee80211_node *ni = &in->in_ni;
3312         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3313
3314         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3315
3316         /* Update rate control statistics. */
3317         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3318             __func__,
3319             (int) le16toh(tx_resp->status.status),
3320             (int) le16toh(tx_resp->status.sequence),
3321             tx_resp->frame_count,
3322             tx_resp->bt_kill_count,
3323             tx_resp->failure_rts,
3324             tx_resp->failure_frame,
3325             le32toh(tx_resp->initial_rate),
3326             (int) le16toh(tx_resp->wireless_media_time));
3327
3328         txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3329                      IEEE80211_RATECTL_STATUS_LONG_RETRY;
3330         txs->short_retries = tx_resp->failure_rts;
3331         txs->long_retries = tx_resp->failure_frame;
3332         if (status != IWM_TX_STATUS_SUCCESS &&
3333             status != IWM_TX_STATUS_DIRECT_DONE) {
3334                 switch (status) {
3335                 case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3336                         txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3337                         break;
3338                 case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3339                         txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3340                         break;
3341                 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3342                         txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3343                         break;
3344                 default:
3345                         txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3346                         break;
3347                 }
3348         } else {
3349                 txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3350         }
3351         ieee80211_ratectl_tx_complete(ni, txs);
3352
3353         return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3354 }
3355
3356 static void
3357 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3358         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3359 {
3360         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3361         int idx = cmd_hdr->idx;
3362         int qid = cmd_hdr->qid;
3363         struct iwm_tx_ring *ring = &sc->txq[qid];
3364         struct iwm_tx_data *txd = &ring->data[idx];
3365         struct iwm_node *in = txd->in;
3366         struct mbuf *m = txd->m;
3367         int status;
3368
3369         KASSERT(txd->done == 0, ("txd not done"));
3370         KASSERT(txd->in != NULL, ("txd without node"));
3371         KASSERT(txd->m != NULL, ("txd without mbuf"));
3372
3373         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3374
3375         sc->sc_tx_timer = 0;
3376
3377         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3378
3379         /* Unmap and free mbuf. */
3380         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3381         bus_dmamap_unload(ring->data_dmat, txd->map);
3382
3383         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3384             "free txd %p, in %p\n", txd, txd->in);
3385         txd->done = 1;
3386         txd->m = NULL;
3387         txd->in = NULL;
3388
3389         ieee80211_tx_complete(&in->in_ni, m, status);
3390
3391         if (--ring->queued < IWM_TX_RING_LOMARK) {
3392                 sc->qfullmsk &= ~(1 << ring->qid);
3393                 if (sc->qfullmsk == 0) {
3394                         iwm_start(sc);
3395                 }
3396         }
3397 }
3398
3399 /*
3400  * transmit side
3401  */
3402
3403 /*
3404  * Process a "command done" firmware notification.  This is where we wakeup
3405  * processes waiting for a synchronous command completion.
3406  * from if_iwn
3407  */
3408 static void
3409 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3410 {
3411         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3412         struct iwm_tx_data *data;
3413
3414         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3415                 return; /* Not a command ack. */
3416         }
3417
3418         /* XXX wide commands? */
3419         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3420             "cmd notification type 0x%x qid %d idx %d\n",
3421             pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3422
3423         data = &ring->data[pkt->hdr.idx];
3424
3425         /* If the command was mapped in an mbuf, free it. */
3426         if (data->m != NULL) {
3427                 bus_dmamap_sync(ring->data_dmat, data->map,
3428                     BUS_DMASYNC_POSTWRITE);
3429                 bus_dmamap_unload(ring->data_dmat, data->map);
3430                 m_freem(data->m);
3431                 data->m = NULL;
3432         }
3433         wakeup(&ring->desc[pkt->hdr.idx]);
3434
3435         if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3436                 device_printf(sc->sc_dev,
3437                     "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3438                     __func__, pkt->hdr.idx, ring->queued, ring->cur);
3439                 /* XXX call iwm_force_nmi() */
3440         }
3441
3442         KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3443         ring->queued--;
3444         if (ring->queued == 0)
3445                 iwm_pcie_clear_cmd_in_flight(sc);
3446 }
3447
3448 #if 0
3449 /*
3450  * necessary only for block ack mode
3451  */
3452 void
3453 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3454         uint16_t len)
3455 {
3456         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3457         uint16_t w_val;
3458
3459         scd_bc_tbl = sc->sched_dma.vaddr;
3460
3461         len += 8; /* magic numbers came naturally from paris */
3462         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3463                 len = roundup(len, 4) / 4;
3464
3465         w_val = htole16(sta_id << 12 | len);
3466
3467         /* Update TX scheduler. */
3468         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3469         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3470             BUS_DMASYNC_PREWRITE);
3471
3472         /* I really wonder what this is ?!? */
3473         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3474                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3475                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3476                     BUS_DMASYNC_PREWRITE);
3477         }
3478 }
3479 #endif
3480
3481 /*
3482  * Take an 802.11 (non-n) rate, find the relevant rate
3483  * table entry.  return the index into in_ridx[].
3484  *
3485  * The caller then uses that index back into in_ridx
3486  * to figure out the rate index programmed /into/
3487  * the firmware for this given node.
3488  */
3489 static int
3490 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3491     uint8_t rate)
3492 {
3493         int i;
3494         uint8_t r;
3495
3496         for (i = 0; i < nitems(in->in_ridx); i++) {
3497                 r = iwm_rates[in->in_ridx[i]].rate;
3498                 if (rate == r)
3499                         return (i);
3500         }
3501
3502         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3503             "%s: couldn't find an entry for rate=%d\n",
3504             __func__,
3505             rate);
3506
3507         /* XXX Return the first */
3508         /* XXX TODO: have it return the /lowest/ */
3509         return (0);
3510 }
3511
3512 static int
3513 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3514 {
3515         int i;
3516
3517         for (i = 0; i < nitems(iwm_rates); i++) {
3518                 if (iwm_rates[i].rate == rate)
3519                         return (i);
3520         }
3521         /* XXX error? */
3522         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3523             "%s: couldn't find an entry for rate=%d\n",
3524             __func__,
3525             rate);
3526         return (0);
3527 }
3528
3529 /*
3530  * Fill in the rate related information for a transmit command.
3531  */
3532 static const struct iwm_rate *
3533 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3534         struct mbuf *m, struct iwm_tx_cmd *tx)
3535 {
3536         struct ieee80211_node *ni = &in->in_ni;
3537         struct ieee80211_frame *wh;
3538         const struct ieee80211_txparam *tp = ni->ni_txparms;
3539         const struct iwm_rate *rinfo;
3540         int type;
3541         int ridx, rate_flags;
3542
3543         wh = mtod(m, struct ieee80211_frame *);
3544         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3545
3546         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3547         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3548
3549         if (type == IEEE80211_FC0_TYPE_MGT ||
3550             type == IEEE80211_FC0_TYPE_CTL ||
3551             (m->m_flags & M_EAPOL) != 0) {
3552                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3553                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3554                     "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3555         } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3556                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3557                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3558                     "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3559         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3560                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3561                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3562                     "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3563         } else {
3564                 int i;
3565
3566                 /* for data frames, use RS table */
3567                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3568                 /* XXX pass pktlen */
3569                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3570                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3571                 ridx = in->in_ridx[i];
3572
3573                 /* This is the index into the programmed table */
3574                 tx->initial_rate_index = i;
3575                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3576
3577                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3578                     "%s: start with i=%d, txrate %d\n",
3579                     __func__, i, iwm_rates[ridx].rate);
3580         }
3581
3582         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3583             "%s: frame type=%d txrate %d\n",
3584                 __func__, type, iwm_rates[ridx].rate);
3585
3586         rinfo = &iwm_rates[ridx];
3587
3588         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3589             __func__, ridx,
3590             rinfo->rate,
3591             !! (IWM_RIDX_IS_CCK(ridx))
3592             );
3593
3594         /* XXX TODO: hard-coded TX antenna? */
3595         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3596         if (IWM_RIDX_IS_CCK(ridx))
3597                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3598         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3599
3600         return rinfo;
3601 }
3602
3603 #define TB0_SIZE 16
3604 static int
3605 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3606 {
3607         struct ieee80211com *ic = &sc->sc_ic;
3608         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3609         struct iwm_node *in = IWM_NODE(ni);
3610         struct iwm_tx_ring *ring;
3611         struct iwm_tx_data *data;
3612         struct iwm_tfd *desc;
3613         struct iwm_device_cmd *cmd;
3614         struct iwm_tx_cmd *tx;
3615         struct ieee80211_frame *wh;
3616         struct ieee80211_key *k = NULL;
3617         struct mbuf *m1;
3618         const struct iwm_rate *rinfo;
3619         uint32_t flags;
3620         u_int hdrlen;
3621         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3622         int nsegs;
3623         uint8_t tid, type;
3624         int i, totlen, error, pad;
3625
3626         wh = mtod(m, struct ieee80211_frame *);
3627         hdrlen = ieee80211_anyhdrsize(wh);
3628         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3629         tid = 0;
3630         ring = &sc->txq[ac];
3631         desc = &ring->desc[ring->cur];
3632         memset(desc, 0, sizeof(*desc));
3633         data = &ring->data[ring->cur];
3634
3635         /* Fill out iwm_tx_cmd to send to the firmware */
3636         cmd = &ring->cmd[ring->cur];
3637         cmd->hdr.code = IWM_TX_CMD;
3638         cmd->hdr.flags = 0;
3639         cmd->hdr.qid = ring->qid;
3640         cmd->hdr.idx = ring->cur;
3641
3642         tx = (void *)cmd->data;
3643         memset(tx, 0, sizeof(*tx));
3644
3645         rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3646
3647         /* Encrypt the frame if need be. */
3648         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3649                 /* Retrieve key for TX && do software encryption. */
3650                 k = ieee80211_crypto_encap(ni, m);
3651                 if (k == NULL) {
3652                         m_freem(m);
3653                         return (ENOBUFS);
3654                 }
3655                 /* 802.11 header may have moved. */
3656                 wh = mtod(m, struct ieee80211_frame *);
3657         }
3658
3659         if (ieee80211_radiotap_active_vap(vap)) {
3660                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3661
3662                 tap->wt_flags = 0;
3663                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3664                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3665                 tap->wt_rate = rinfo->rate;
3666                 if (k != NULL)
3667                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3668                 ieee80211_radiotap_tx(vap, m);
3669         }
3670
3671
3672         totlen = m->m_pkthdr.len;
3673
3674         flags = 0;
3675         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3676                 flags |= IWM_TX_CMD_FLG_ACK;
3677         }
3678
3679         if (type == IEEE80211_FC0_TYPE_DATA
3680             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3681             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3682                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3683         }
3684
3685         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3686             type != IEEE80211_FC0_TYPE_DATA)
3687                 tx->sta_id = sc->sc_aux_sta.sta_id;
3688         else
3689                 tx->sta_id = IWM_STATION_ID;
3690
3691         if (type == IEEE80211_FC0_TYPE_MGT) {
3692                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3693
3694                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3695                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3696                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3697                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3698                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3699                 } else {
3700                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3701                 }
3702         } else {
3703                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3704         }
3705
3706         if (hdrlen & 3) {
3707                 /* First segment length must be a multiple of 4. */
3708                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3709                 pad = 4 - (hdrlen & 3);
3710         } else
3711                 pad = 0;
3712
3713         tx->driver_txop = 0;
3714         tx->next_frame_len = 0;
3715
3716         tx->len = htole16(totlen);
3717         tx->tid_tspec = tid;
3718         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3719
3720         /* Set physical address of "scratch area". */
3721         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3722         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3723
3724         /* Copy 802.11 header in TX command. */
3725         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3726
3727         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3728
3729         tx->sec_ctl = 0;
3730         tx->tx_flags |= htole32(flags);
3731
3732         /* Trim 802.11 header. */
3733         m_adj(m, hdrlen);
3734         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3735             segs, &nsegs, BUS_DMA_NOWAIT);
3736         if (error != 0) {
3737                 if (error != EFBIG) {
3738                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3739                             error);
3740                         m_freem(m);
3741                         return error;
3742                 }
3743                 /* Too many DMA segments, linearize mbuf. */
3744                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3745                 if (m1 == NULL) {
3746                         device_printf(sc->sc_dev,
3747                             "%s: could not defrag mbuf\n", __func__);
3748                         m_freem(m);
3749                         return (ENOBUFS);
3750                 }
3751                 m = m1;
3752
3753                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3754                     segs, &nsegs, BUS_DMA_NOWAIT);
3755                 if (error != 0) {
3756                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3757                             error);
3758                         m_freem(m);
3759                         return error;
3760                 }
3761         }
3762         data->m = m;
3763         data->in = in;
3764         data->done = 0;
3765
3766         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3767             "sending txd %p, in %p\n", data, data->in);
3768         KASSERT(data->in != NULL, ("node is NULL"));
3769
3770         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3771             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3772             ring->qid, ring->cur, totlen, nsegs,
3773             le32toh(tx->tx_flags),
3774             le32toh(tx->rate_n_flags),
3775             tx->initial_rate_index
3776             );
3777
3778         /* Fill TX descriptor. */
3779         desc->num_tbs = 2 + nsegs;
3780
3781         desc->tbs[0].lo = htole32(data->cmd_paddr);
3782         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3783             (TB0_SIZE << 4);
3784         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3785         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3786             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3787               + hdrlen + pad - TB0_SIZE) << 4);
3788
3789         /* Other DMA segments are for data payload. */
3790         for (i = 0; i < nsegs; i++) {
3791                 seg = &segs[i];
3792                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3793                 desc->tbs[i+2].hi_n_len = \
3794                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3795                     | ((seg->ds_len) << 4);
3796         }
3797
3798         bus_dmamap_sync(ring->data_dmat, data->map,
3799             BUS_DMASYNC_PREWRITE);
3800         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3801             BUS_DMASYNC_PREWRITE);
3802         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3803             BUS_DMASYNC_PREWRITE);
3804
3805 #if 0
3806         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3807 #endif
3808
3809         /* Kick TX ring. */
3810         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3811         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3812
3813         /* Mark TX ring as full if we reach a certain threshold. */
3814         if (++ring->queued > IWM_TX_RING_HIMARK) {
3815                 sc->qfullmsk |= 1 << ring->qid;
3816         }
3817
3818         return 0;
3819 }
3820
3821 static int
3822 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3823     const struct ieee80211_bpf_params *params)
3824 {
3825         struct ieee80211com *ic = ni->ni_ic;
3826         struct iwm_softc *sc = ic->ic_softc;
3827         int error = 0;
3828
3829         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3830             "->%s begin\n", __func__);
3831
3832         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3833                 m_freem(m);
3834                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3835                     "<-%s not RUNNING\n", __func__);
3836                 return (ENETDOWN);
3837         }
3838
3839         IWM_LOCK(sc);
3840         /* XXX fix this */
3841         if (params == NULL) {
3842                 error = iwm_tx(sc, m, ni, 0);
3843         } else {
3844                 error = iwm_tx(sc, m, ni, 0);
3845         }
3846         sc->sc_tx_timer = 5;
3847         IWM_UNLOCK(sc);
3848
3849         return (error);
3850 }
3851
3852 /*
3853  * mvm/tx.c
3854  */
3855
3856 /*
3857  * Note that there are transports that buffer frames before they reach
3858  * the firmware. This means that after flush_tx_path is called, the
3859  * queue might not be empty. The race-free way to handle this is to:
3860  * 1) set the station as draining
3861  * 2) flush the Tx path
3862  * 3) wait for the transport queues to be empty
3863  */
3864 int
3865 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3866 {
3867         int ret;
3868         struct iwm_tx_path_flush_cmd flush_cmd = {
3869                 .queues_ctl = htole32(tfd_msk),
3870                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3871         };
3872
3873         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3874             sizeof(flush_cmd), &flush_cmd);
3875         if (ret)
3876                 device_printf(sc->sc_dev,
3877                     "Flushing tx queue failed: %d\n", ret);
3878         return ret;
3879 }
3880
3881 /*
3882  * BEGIN mvm/sta.c
3883  */
3884
3885 static int
3886 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3887         struct iwm_mvm_add_sta_cmd *cmd, int *status)
3888 {
3889         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3890             cmd, status);
3891 }
3892
3893 /* send station add/update command to firmware */
3894 static int
3895 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3896 {
3897         struct iwm_mvm_add_sta_cmd add_sta_cmd;
3898         int ret;
3899         uint32_t status;
3900
3901         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3902
3903         add_sta_cmd.sta_id = IWM_STATION_ID;
3904         add_sta_cmd.mac_id_n_color
3905             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3906                 IWM_DEFAULT_COLOR));
3907         if (!update) {
3908                 int ac;
3909                 for (ac = 0; ac < WME_NUM_AC; ac++) {
3910                         add_sta_cmd.tfd_queue_msk |=
3911                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3912                 }
3913                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3914         }
3915         add_sta_cmd.add_modify = update ? 1 : 0;
3916         add_sta_cmd.station_flags_msk
3917             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3918         add_sta_cmd.tid_disable_tx = htole16(0xffff);
3919         if (update)
3920                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3921
3922         status = IWM_ADD_STA_SUCCESS;
3923         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3924         if (ret)
3925                 return ret;
3926
3927         switch (status) {
3928         case IWM_ADD_STA_SUCCESS:
3929                 break;
3930         default:
3931                 ret = EIO;
3932                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3933                 break;
3934         }
3935
3936         return ret;
3937 }
3938
3939 static int
3940 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3941 {
3942         return iwm_mvm_sta_send_to_fw(sc, in, 0);
3943 }
3944
3945 static int
3946 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3947 {
3948         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3949 }
3950
3951 static int
3952 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3953         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3954 {
3955         struct iwm_mvm_add_sta_cmd cmd;
3956         int ret;
3957         uint32_t status;
3958
3959         memset(&cmd, 0, sizeof(cmd));
3960         cmd.sta_id = sta->sta_id;
3961         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3962
3963         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3964         cmd.tid_disable_tx = htole16(0xffff);
3965
3966         if (addr)
3967                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3968
3969         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3970         if (ret)
3971                 return ret;
3972
3973         switch (status) {
3974         case IWM_ADD_STA_SUCCESS:
3975                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3976                     "%s: Internal station added.\n", __func__);
3977                 return 0;
3978         default:
3979                 device_printf(sc->sc_dev,
3980                     "%s: Add internal station failed, status=0x%x\n",
3981                     __func__, status);
3982                 ret = EIO;
3983                 break;
3984         }
3985         return ret;
3986 }
3987
3988 static int
3989 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3990 {
3991         int ret;
3992
3993         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3994         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3995
3996         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3997         if (ret)
3998                 return ret;
3999
4000         ret = iwm_mvm_add_int_sta_common(sc,
4001             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4002
4003         if (ret)
4004                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4005         return ret;
4006 }
4007
4008 /*
4009  * END mvm/sta.c
4010  */
4011
4012 /*
4013  * BEGIN mvm/quota.c
4014  */
4015
4016 static int
4017 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
4018 {
4019         struct iwm_time_quota_cmd cmd;
4020         int i, idx, ret, num_active_macs, quota, quota_rem;
4021         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4022         int n_ifs[IWM_MAX_BINDINGS] = {0, };
4023         uint16_t id;
4024
4025         memset(&cmd, 0, sizeof(cmd));
4026
4027         /* currently, PHY ID == binding ID */
4028         if (in) {
4029                 id = in->in_phyctxt->id;
4030                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4031                 colors[id] = in->in_phyctxt->color;
4032
4033                 if (1)
4034                         n_ifs[id] = 1;
4035         }
4036
4037         /*
4038          * The FW's scheduling session consists of
4039          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
4040          * equally between all the bindings that require quota
4041          */
4042         num_active_macs = 0;
4043         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4044                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4045                 num_active_macs += n_ifs[i];
4046         }
4047
4048         quota = 0;
4049         quota_rem = 0;
4050         if (num_active_macs) {
4051                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
4052                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
4053         }
4054
4055         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4056                 if (colors[i] < 0)
4057                         continue;
4058
4059                 cmd.quotas[idx].id_and_color =
4060                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4061
4062                 if (n_ifs[i] <= 0) {
4063                         cmd.quotas[idx].quota = htole32(0);
4064                         cmd.quotas[idx].max_duration = htole32(0);
4065                 } else {
4066                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4067                         cmd.quotas[idx].max_duration = htole32(0);
4068                 }
4069                 idx++;
4070         }
4071
4072         /* Give the remainder of the session to the first binding */
4073         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4074
4075         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4076             sizeof(cmd), &cmd);
4077         if (ret)
4078                 device_printf(sc->sc_dev,
4079                     "%s: Failed to send quota: %d\n", __func__, ret);
4080         return ret;
4081 }
4082
4083 /*
4084  * END mvm/quota.c
4085  */
4086
4087 /*
4088  * ieee80211 routines
4089  */
4090
4091 /*
4092  * Change to AUTH state in 80211 state machine.  Roughly matches what
4093  * Linux does in bss_info_changed().
4094  */
4095 static int
4096 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4097 {
4098         struct ieee80211_node *ni;
4099         struct iwm_node *in;
4100         struct iwm_vap *iv = IWM_VAP(vap);
4101         uint32_t duration;
4102         int error;
4103
4104         /*
4105          * XXX i have a feeling that the vap node is being
4106          * freed from underneath us. Grr.
4107          */
4108         ni = ieee80211_ref_node(vap->iv_bss);
4109         in = IWM_NODE(ni);
4110         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4111             "%s: called; vap=%p, bss ni=%p\n",
4112             __func__,
4113             vap,
4114             ni);
4115
4116         in->in_assoc = 0;
4117
4118         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4119         if (error != 0)
4120                 return error;
4121
4122         error = iwm_allow_mcast(vap, sc);
4123         if (error) {
4124                 device_printf(sc->sc_dev,
4125                     "%s: failed to set multicast\n", __func__);
4126                 goto out;
4127         }
4128
4129         /*
4130          * This is where it deviates from what Linux does.
4131          *
4132          * Linux iwlwifi doesn't reset the nic each time, nor does it
4133          * call ctxt_add() here.  Instead, it adds it during vap creation,
4134          * and always does a mac_ctx_changed().
4135          *
4136          * The openbsd port doesn't attempt to do that - it reset things
4137          * at odd states and does the add here.
4138          *
4139          * So, until the state handling is fixed (ie, we never reset
4140          * the NIC except for a firmware failure, which should drag
4141          * the NIC back to IDLE, re-setup and re-add all the mac/phy
4142          * contexts that are required), let's do a dirty hack here.
4143          */
4144         if (iv->is_uploaded) {
4145                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4146                         device_printf(sc->sc_dev,
4147                             "%s: failed to update MAC\n", __func__);
4148                         goto out;
4149                 }
4150                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4151                     in->in_ni.ni_chan, 1, 1)) != 0) {
4152                         device_printf(sc->sc_dev,
4153                             "%s: failed update phy ctxt\n", __func__);
4154                         goto out;
4155                 }
4156                 in->in_phyctxt = &sc->sc_phyctxt[0];
4157
4158                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
4159                         device_printf(sc->sc_dev,
4160                             "%s: binding update cmd\n", __func__);
4161                         goto out;
4162                 }
4163                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4164                         device_printf(sc->sc_dev,
4165                             "%s: failed to update sta\n", __func__);
4166                         goto out;
4167                 }
4168         } else {
4169                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4170                         device_printf(sc->sc_dev,
4171                             "%s: failed to add MAC\n", __func__);
4172                         goto out;
4173                 }
4174                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4175                     in->in_ni.ni_chan, 1, 1)) != 0) {
4176                         device_printf(sc->sc_dev,
4177                             "%s: failed add phy ctxt!\n", __func__);
4178                         error = ETIMEDOUT;
4179                         goto out;
4180                 }
4181                 in->in_phyctxt = &sc->sc_phyctxt[0];
4182
4183                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
4184                         device_printf(sc->sc_dev,
4185                             "%s: binding add cmd\n", __func__);
4186                         goto out;
4187                 }
4188                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4189                         device_printf(sc->sc_dev,
4190                             "%s: failed to add sta\n", __func__);
4191                         goto out;
4192                 }
4193         }
4194
4195         /*
4196          * Prevent the FW from wandering off channel during association
4197          * by "protecting" the session with a time event.
4198          */
4199         /* XXX duration is in units of TU, not MS */
4200         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4201         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
4202         DELAY(100);
4203
4204         error = 0;
4205 out:
4206         ieee80211_free_node(ni);
4207         return (error);
4208 }
4209
4210 static int
4211 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4212 {
4213         struct iwm_node *in = IWM_NODE(vap->iv_bss);
4214         int error;
4215
4216         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4217                 device_printf(sc->sc_dev,
4218                     "%s: failed to update STA\n", __func__);
4219                 return error;
4220         }
4221
4222         in->in_assoc = 1;
4223         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4224                 device_printf(sc->sc_dev,
4225                     "%s: failed to update MAC\n", __func__);
4226                 return error;
4227         }
4228
4229         return 0;
4230 }
4231
4232 static int
4233 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4234 {
4235         uint32_t tfd_msk;
4236
4237         /*
4238          * Ok, so *technically* the proper set of calls for going
4239          * from RUN back to SCAN is:
4240          *
4241          * iwm_mvm_power_mac_disable(sc, in);
4242          * iwm_mvm_mac_ctxt_changed(sc, in);
4243          * iwm_mvm_rm_sta(sc, in);
4244          * iwm_mvm_update_quotas(sc, NULL);
4245          * iwm_mvm_mac_ctxt_changed(sc, in);
4246          * iwm_mvm_binding_remove_vif(sc, in);
4247          * iwm_mvm_mac_ctxt_remove(sc, in);
4248          *
4249          * However, that freezes the device not matter which permutations
4250          * and modifications are attempted.  Obviously, this driver is missing
4251          * something since it works in the Linux driver, but figuring out what
4252          * is missing is a little more complicated.  Now, since we're going
4253          * back to nothing anyway, we'll just do a complete device reset.
4254          * Up your's, device!
4255          */
4256         /*
4257          * Just using 0xf for the queues mask is fine as long as we only
4258          * get here from RUN state.
4259          */
4260         tfd_msk = 0xf;
4261         mbufq_drain(&sc->sc_snd);
4262         iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4263         /*
4264          * We seem to get away with just synchronously sending the
4265          * IWM_TXPATH_FLUSH command.
4266          */
4267 //      iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4268         iwm_stop_device(sc);
4269         iwm_init_hw(sc);
4270         if (in)
4271                 in->in_assoc = 0;
4272         return 0;
4273
4274 #if 0
4275         int error;
4276
4277         iwm_mvm_power_mac_disable(sc, in);
4278
4279         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4280                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4281                 return error;
4282         }
4283
4284         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4285                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4286                 return error;
4287         }
4288         error = iwm_mvm_rm_sta(sc, in);
4289         in->in_assoc = 0;
4290         iwm_mvm_update_quotas(sc, NULL);
4291         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4292                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4293                 return error;
4294         }
4295         iwm_mvm_binding_remove_vif(sc, in);
4296
4297         iwm_mvm_mac_ctxt_remove(sc, in);
4298
4299         return error;
4300 #endif
4301 }
4302
4303 static struct ieee80211_node *
4304 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4305 {
4306         return malloc(sizeof (struct iwm_node), M_80211_NODE,
4307             M_NOWAIT | M_ZERO);
4308 }
4309
4310 uint8_t
4311 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4312 {
4313         int i;
4314         uint8_t rval;
4315
4316         for (i = 0; i < rs->rs_nrates; i++) {
4317                 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4318                 if (rval == iwm_rates[ridx].rate)
4319                         return rs->rs_rates[i];
4320         }
4321
4322         return 0;
4323 }
4324
4325 static void
4326 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4327 {
4328         struct ieee80211_node *ni = &in->in_ni;
4329         struct iwm_lq_cmd *lq = &in->in_lq;
4330         int nrates = ni->ni_rates.rs_nrates;
4331         int i, ridx, tab = 0;
4332 //      int txant = 0;
4333
4334         if (nrates > nitems(lq->rs_table)) {
4335                 device_printf(sc->sc_dev,
4336                     "%s: node supports %d rates, driver handles "
4337                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4338                 return;
4339         }
4340         if (nrates == 0) {
4341                 device_printf(sc->sc_dev,
4342                     "%s: node supports 0 rates, odd!\n", __func__);
4343                 return;
4344         }
4345
4346         /*
4347          * XXX .. and most of iwm_node is not initialised explicitly;
4348          * it's all just 0x0 passed to the firmware.
4349          */
4350
4351         /* first figure out which rates we should support */
4352         /* XXX TODO: this isn't 11n aware /at all/ */
4353         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4354         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4355             "%s: nrates=%d\n", __func__, nrates);
4356
4357         /*
4358          * Loop over nrates and populate in_ridx from the highest
4359          * rate to the lowest rate.  Remember, in_ridx[] has
4360          * IEEE80211_RATE_MAXSIZE entries!
4361          */
4362         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4363                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4364
4365                 /* Map 802.11 rate to HW rate index. */
4366                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4367                         if (iwm_rates[ridx].rate == rate)
4368                                 break;
4369                 if (ridx > IWM_RIDX_MAX) {
4370                         device_printf(sc->sc_dev,
4371                             "%s: WARNING: device rate for %d not found!\n",
4372                             __func__, rate);
4373                 } else {
4374                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4375                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4376                             __func__,
4377                             i,
4378                             rate,
4379                             ridx);
4380                         in->in_ridx[i] = ridx;
4381                 }
4382         }
4383
4384         /* then construct a lq_cmd based on those */
4385         memset(lq, 0, sizeof(*lq));
4386         lq->sta_id = IWM_STATION_ID;
4387
4388         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4389         if (ni->ni_flags & IEEE80211_NODE_HT)
4390                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4391
4392         /*
4393          * are these used? (we don't do SISO or MIMO)
4394          * need to set them to non-zero, though, or we get an error.
4395          */
4396         lq->single_stream_ant_msk = 1;
4397         lq->dual_stream_ant_msk = 1;
4398
4399         /*
4400          * Build the actual rate selection table.
4401          * The lowest bits are the rates.  Additionally,
4402          * CCK needs bit 9 to be set.  The rest of the bits
4403          * we add to the table select the tx antenna
4404          * Note that we add the rates in the highest rate first
4405          * (opposite of ni_rates).
4406          */
4407         /*
4408          * XXX TODO: this should be looping over the min of nrates
4409          * and LQ_MAX_RETRY_NUM.  Sigh.
4410          */
4411         for (i = 0; i < nrates; i++) {
4412                 int nextant;
4413
4414 #if 0
4415                 if (txant == 0)
4416                         txant = iwm_mvm_get_valid_tx_ant(sc);
4417                 nextant = 1<<(ffs(txant)-1);
4418                 txant &= ~nextant;
4419 #else
4420                 nextant = iwm_mvm_get_valid_tx_ant(sc);
4421 #endif
4422                 /*
4423                  * Map the rate id into a rate index into
4424                  * our hardware table containing the
4425                  * configuration to use for this rate.
4426                  */
4427                 ridx = in->in_ridx[i];
4428                 tab = iwm_rates[ridx].plcp;
4429                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4430                 if (IWM_RIDX_IS_CCK(ridx))
4431                         tab |= IWM_RATE_MCS_CCK_MSK;
4432                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4433                     "station rate i=%d, rate=%d, hw=%x\n",
4434                     i, iwm_rates[ridx].rate, tab);
4435                 lq->rs_table[i] = htole32(tab);
4436         }
4437         /* then fill the rest with the lowest possible rate */
4438         for (i = nrates; i < nitems(lq->rs_table); i++) {
4439                 KASSERT(tab != 0, ("invalid tab"));
4440                 lq->rs_table[i] = htole32(tab);
4441         }
4442 }
4443
4444 static int
4445 iwm_media_change(struct ifnet *ifp)
4446 {
4447         struct ieee80211vap *vap = ifp->if_softc;
4448         struct ieee80211com *ic = vap->iv_ic;
4449         struct iwm_softc *sc = ic->ic_softc;
4450         int error;
4451
4452         error = ieee80211_media_change(ifp);
4453         if (error != ENETRESET)
4454                 return error;
4455
4456         IWM_LOCK(sc);
4457         if (ic->ic_nrunning > 0) {
4458                 iwm_stop(sc);
4459                 iwm_init(sc);
4460         }
4461         IWM_UNLOCK(sc);
4462         return error;
4463 }
4464
4465
4466 static int
4467 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4468 {
4469         struct iwm_vap *ivp = IWM_VAP(vap);
4470         struct ieee80211com *ic = vap->iv_ic;
4471         struct iwm_softc *sc = ic->ic_softc;
4472         struct iwm_node *in;
4473         int error;
4474
4475         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4476             "switching state %s -> %s\n",
4477             ieee80211_state_name[vap->iv_state],
4478             ieee80211_state_name[nstate]);
4479         IEEE80211_UNLOCK(ic);
4480         IWM_LOCK(sc);
4481
4482         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4483                 iwm_led_blink_stop(sc);
4484
4485         /* disable beacon filtering if we're hopping out of RUN */
4486         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4487                 iwm_mvm_disable_beacon_filter(sc);
4488
4489                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4490                         in->in_assoc = 0;
4491
4492                 if (nstate == IEEE80211_S_INIT) {
4493                         IWM_UNLOCK(sc);
4494                         IEEE80211_LOCK(ic);
4495                         error = ivp->iv_newstate(vap, nstate, arg);
4496                         IEEE80211_UNLOCK(ic);
4497                         IWM_LOCK(sc);
4498                         iwm_release(sc, NULL);
4499                         IWM_UNLOCK(sc);
4500                         IEEE80211_LOCK(ic);
4501                         return error;
4502                 }
4503
4504                 /*
4505                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4506                  * above then the card will be completely reinitialized,
4507                  * so the driver must do everything necessary to bring the card
4508                  * from INIT to SCAN.
4509                  *
4510                  * Additionally, upon receiving deauth frame from AP,
4511                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4512                  * state. This will also fail with this driver, so bring the FSM
4513                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4514                  *
4515                  * XXX TODO: fix this for FreeBSD!
4516                  */
4517                 if (nstate == IEEE80211_S_SCAN ||
4518                     nstate == IEEE80211_S_AUTH ||
4519                     nstate == IEEE80211_S_ASSOC) {
4520                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4521                             "Force transition to INIT; MGT=%d\n", arg);
4522                         IWM_UNLOCK(sc);
4523                         IEEE80211_LOCK(ic);
4524                         /* Always pass arg as -1 since we can't Tx right now. */
4525                         /*
4526                          * XXX arg is just ignored anyway when transitioning
4527                          *     to IEEE80211_S_INIT.
4528                          */
4529                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4530                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4531                             "Going INIT->SCAN\n");
4532                         nstate = IEEE80211_S_SCAN;
4533                         IEEE80211_UNLOCK(ic);
4534                         IWM_LOCK(sc);
4535                 }
4536         }
4537
4538         switch (nstate) {
4539         case IEEE80211_S_INIT:
4540                 break;
4541
4542         case IEEE80211_S_AUTH:
4543                 if ((error = iwm_auth(vap, sc)) != 0) {
4544                         device_printf(sc->sc_dev,
4545                             "%s: could not move to auth state: %d\n",
4546                             __func__, error);
4547                         break;
4548                 }
4549                 break;
4550
4551         case IEEE80211_S_ASSOC:
4552                 /*
4553                  * EBS may be disabled due to previous failures reported by FW.
4554                  * Reset EBS status here assuming environment has been changed.
4555                  */
4556                 sc->last_ebs_successful = TRUE;
4557                 if ((error = iwm_assoc(vap, sc)) != 0) {
4558                         device_printf(sc->sc_dev,
4559                             "%s: failed to associate: %d\n", __func__,
4560                             error);
4561                         break;
4562                 }
4563                 break;
4564
4565         case IEEE80211_S_RUN:
4566         {
4567                 struct iwm_host_cmd cmd = {
4568                         .id = IWM_LQ_CMD,
4569                         .len = { sizeof(in->in_lq), },
4570                         .flags = IWM_CMD_SYNC,
4571                 };
4572
4573                 /* Update the association state, now we have it all */
4574                 /* (eg associd comes in at this point */
4575                 error = iwm_assoc(vap, sc);
4576                 if (error != 0) {
4577                         device_printf(sc->sc_dev,
4578                             "%s: failed to update association state: %d\n",
4579                             __func__,
4580                             error);
4581                         break;
4582                 }
4583
4584                 in = IWM_NODE(vap->iv_bss);
4585                 iwm_mvm_power_mac_update_mode(sc, in);
4586                 iwm_mvm_enable_beacon_filter(sc, in);
4587                 iwm_mvm_update_quotas(sc, in);
4588                 iwm_setrates(sc, in);
4589
4590                 cmd.data[0] = &in->in_lq;
4591                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4592                         device_printf(sc->sc_dev,
4593                             "%s: IWM_LQ_CMD failed\n", __func__);
4594                 }
4595
4596                 iwm_mvm_led_enable(sc);
4597                 break;
4598         }
4599
4600         default:
4601                 break;
4602         }
4603         IWM_UNLOCK(sc);
4604         IEEE80211_LOCK(ic);
4605
4606         return (ivp->iv_newstate(vap, nstate, arg));
4607 }
4608
4609 void
4610 iwm_endscan_cb(void *arg, int pending)
4611 {
4612         struct iwm_softc *sc = arg;
4613         struct ieee80211com *ic = &sc->sc_ic;
4614
4615         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4616             "%s: scan ended\n",
4617             __func__);
4618
4619         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4620 }
4621
4622 /*
4623  * Aging and idle timeouts for the different possible scenarios
4624  * in default configuration
4625  */
4626 static const uint32_t
4627 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4628         {
4629                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4630                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4631         },
4632         {
4633                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4634                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4635         },
4636         {
4637                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4638                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4639         },
4640         {
4641                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4642                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4643         },
4644         {
4645                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4646                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4647         },
4648 };
4649
4650 /*
4651  * Aging and idle timeouts for the different possible scenarios
4652  * in single BSS MAC configuration.
4653  */
4654 static const uint32_t
4655 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4656         {
4657                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4658                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4659         },
4660         {
4661                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4662                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4663         },
4664         {
4665                 htole32(IWM_SF_MCAST_AGING_TIMER),
4666                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4667         },
4668         {
4669                 htole32(IWM_SF_BA_AGING_TIMER),
4670                 htole32(IWM_SF_BA_IDLE_TIMER)
4671         },
4672         {
4673                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4674                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4675         },
4676 };
4677
4678 static void
4679 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4680     struct ieee80211_node *ni)
4681 {
4682         int i, j, watermark;
4683
4684         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4685
4686         /*
4687          * If we are in association flow - check antenna configuration
4688          * capabilities of the AP station, and choose the watermark accordingly.
4689          */
4690         if (ni) {
4691                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4692 #ifdef notyet
4693                         if (ni->ni_rxmcs[2] != 0)
4694                                 watermark = IWM_SF_W_MARK_MIMO3;
4695                         else if (ni->ni_rxmcs[1] != 0)
4696                                 watermark = IWM_SF_W_MARK_MIMO2;
4697                         else
4698 #endif
4699                                 watermark = IWM_SF_W_MARK_SISO;
4700                 } else {
4701                         watermark = IWM_SF_W_MARK_LEGACY;
4702                 }
4703         /* default watermark value for unassociated mode. */
4704         } else {
4705                 watermark = IWM_SF_W_MARK_MIMO2;
4706         }
4707         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4708
4709         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4710                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4711                         sf_cmd->long_delay_timeouts[i][j] =
4712                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4713                 }
4714         }
4715
4716         if (ni) {
4717                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4718                        sizeof(iwm_sf_full_timeout));
4719         } else {
4720                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4721                        sizeof(iwm_sf_full_timeout_def));
4722         }
4723 }
4724
4725 static int
4726 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4727 {
4728         struct ieee80211com *ic = &sc->sc_ic;
4729         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4730         struct iwm_sf_cfg_cmd sf_cmd = {
4731                 .state = htole32(IWM_SF_FULL_ON),
4732         };
4733         int ret = 0;
4734
4735         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4736                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4737
4738         switch (new_state) {
4739         case IWM_SF_UNINIT:
4740         case IWM_SF_INIT_OFF:
4741                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4742                 break;
4743         case IWM_SF_FULL_ON:
4744                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4745                 break;
4746         default:
4747                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4748                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4749                           new_state);
4750                 return EINVAL;
4751         }
4752
4753         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4754                                    sizeof(sf_cmd), &sf_cmd);
4755         return ret;
4756 }
4757
4758 static int
4759 iwm_send_bt_init_conf(struct iwm_softc *sc)
4760 {
4761         struct iwm_bt_coex_cmd bt_cmd;
4762
4763         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4764         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4765
4766         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4767             &bt_cmd);
4768 }
4769
4770 static int
4771 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4772 {
4773         struct iwm_mcc_update_cmd mcc_cmd;
4774         struct iwm_host_cmd hcmd = {
4775                 .id = IWM_MCC_UPDATE_CMD,
4776                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4777                 .data = { &mcc_cmd },
4778         };
4779         int ret;
4780 #ifdef IWM_DEBUG
4781         struct iwm_rx_packet *pkt;
4782         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4783         struct iwm_mcc_update_resp *mcc_resp;
4784         int n_channels;
4785         uint16_t mcc;
4786 #endif
4787         int resp_v2 = fw_has_capa(&sc->ucode_capa,
4788             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4789
4790         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4791         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4792         if (fw_has_api(&sc->ucode_capa, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4793             fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4794                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4795         else
4796                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4797
4798         if (resp_v2)
4799                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4800         else
4801                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4802
4803         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4804             "send MCC update to FW with '%c%c' src = %d\n",
4805             alpha2[0], alpha2[1], mcc_cmd.source_id);
4806
4807         ret = iwm_send_cmd(sc, &hcmd);
4808         if (ret)
4809                 return ret;
4810
4811 #ifdef IWM_DEBUG
4812         pkt = hcmd.resp_pkt;
4813
4814         /* Extract MCC response */
4815         if (resp_v2) {
4816                 mcc_resp = (void *)pkt->data;
4817                 mcc = mcc_resp->mcc;
4818                 n_channels =  le32toh(mcc_resp->n_channels);
4819         } else {
4820                 mcc_resp_v1 = (void *)pkt->data;
4821                 mcc = mcc_resp_v1->mcc;
4822                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4823         }
4824
4825         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4826         if (mcc == 0)
4827                 mcc = 0x3030;  /* "00" - world */
4828
4829         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4830             "regulatory domain '%c%c' (%d channels available)\n",
4831             mcc >> 8, mcc & 0xff, n_channels);
4832 #endif
4833         iwm_free_resp(sc, &hcmd);
4834
4835         return 0;
4836 }
4837
4838 static void
4839 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4840 {
4841         struct iwm_host_cmd cmd = {
4842                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4843                 .len = { sizeof(uint32_t), },
4844                 .data = { &backoff, },
4845         };
4846
4847         if (iwm_send_cmd(sc, &cmd) != 0) {
4848                 device_printf(sc->sc_dev,
4849                     "failed to change thermal tx backoff\n");
4850         }
4851 }
4852
4853 static int
4854 iwm_init_hw(struct iwm_softc *sc)
4855 {
4856         struct ieee80211com *ic = &sc->sc_ic;
4857         int error, i, ac;
4858
4859         if ((error = iwm_start_hw(sc)) != 0) {
4860                 printf("iwm_start_hw: failed %d\n", error);
4861                 return error;
4862         }
4863
4864         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4865                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4866                 return error;
4867         }
4868
4869         /*
4870          * should stop and start HW since that INIT
4871          * image just loaded
4872          */
4873         iwm_stop_device(sc);
4874         if ((error = iwm_start_hw(sc)) != 0) {
4875                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4876                 return error;
4877         }
4878
4879         /* omstart, this time with the regular firmware */
4880         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4881         if (error) {
4882                 device_printf(sc->sc_dev, "could not load firmware\n");
4883                 goto error;
4884         }
4885
4886         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4887                 device_printf(sc->sc_dev, "bt init conf failed\n");
4888                 goto error;
4889         }
4890
4891         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4892         if (error != 0) {
4893                 device_printf(sc->sc_dev, "antenna config failed\n");
4894                 goto error;
4895         }
4896
4897         /* Send phy db control command and then phy db calibration */
4898         if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4899                 goto error;
4900
4901         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4902                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4903                 goto error;
4904         }
4905
4906         /* Add auxiliary station for scanning */
4907         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4908                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4909                 goto error;
4910         }
4911
4912         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4913                 /*
4914                  * The channel used here isn't relevant as it's
4915                  * going to be overwritten in the other flows.
4916                  * For now use the first channel we have.
4917                  */
4918                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4919                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4920                         goto error;
4921         }
4922
4923         /* Initialize tx backoffs to the minimum. */
4924         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4925                 iwm_mvm_tt_tx_backoff(sc, 0);
4926
4927         error = iwm_mvm_power_update_device(sc);
4928         if (error)
4929                 goto error;
4930
4931         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4932                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4933                         goto error;
4934         }
4935
4936         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4937                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4938                         goto error;
4939         }
4940
4941         /* Enable Tx queues. */
4942         for (ac = 0; ac < WME_NUM_AC; ac++) {
4943                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4944                     iwm_mvm_ac_to_tx_fifo[ac]);
4945                 if (error)
4946                         goto error;
4947         }
4948
4949         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4950                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4951                 goto error;
4952         }
4953
4954         return 0;
4955
4956  error:
4957         iwm_stop_device(sc);
4958         return error;
4959 }
4960
4961 /* Allow multicast from our BSSID. */
4962 static int
4963 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4964 {
4965         struct ieee80211_node *ni = vap->iv_bss;
4966         struct iwm_mcast_filter_cmd *cmd;
4967         size_t size;
4968         int error;
4969
4970         size = roundup(sizeof(*cmd), 4);
4971         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4972         if (cmd == NULL)
4973                 return ENOMEM;
4974         cmd->filter_own = 1;
4975         cmd->port_id = 0;
4976         cmd->count = 0;
4977         cmd->pass_all = 1;
4978         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4979
4980         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4981             IWM_CMD_SYNC, size, cmd);
4982         free(cmd, M_DEVBUF);
4983
4984         return (error);
4985 }
4986
4987 /*
4988  * ifnet interfaces
4989  */
4990
4991 static void
4992 iwm_init(struct iwm_softc *sc)
4993 {
4994         int error;
4995
4996         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4997                 return;
4998         }
4999         sc->sc_generation++;
5000         sc->sc_flags &= ~IWM_FLAG_STOPPED;
5001
5002         if ((error = iwm_init_hw(sc)) != 0) {
5003                 printf("iwm_init_hw failed %d\n", error);
5004                 iwm_stop(sc);
5005                 return;
5006         }
5007
5008         /*
5009          * Ok, firmware loaded and we are jogging
5010          */
5011         sc->sc_flags |= IWM_FLAG_HW_INITED;
5012         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5013 }
5014
5015 static int
5016 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
5017 {
5018         struct iwm_softc *sc;
5019         int error;
5020
5021         sc = ic->ic_softc;
5022
5023         IWM_LOCK(sc);
5024         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
5025                 IWM_UNLOCK(sc);
5026                 return (ENXIO);
5027         }
5028         error = mbufq_enqueue(&sc->sc_snd, m);
5029         if (error) {
5030                 IWM_UNLOCK(sc);
5031                 return (error);
5032         }
5033         iwm_start(sc);
5034         IWM_UNLOCK(sc);
5035         return (0);
5036 }
5037
5038 /*
5039  * Dequeue packets from sendq and call send.
5040  */
5041 static void
5042 iwm_start(struct iwm_softc *sc)
5043 {
5044         struct ieee80211_node *ni;
5045         struct mbuf *m;
5046         int ac = 0;
5047
5048         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5049         while (sc->qfullmsk == 0 &&
5050                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5051                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5052                 if (iwm_tx(sc, m, ni, ac) != 0) {
5053                         if_inc_counter(ni->ni_vap->iv_ifp,
5054                             IFCOUNTER_OERRORS, 1);
5055                         ieee80211_free_node(ni);
5056                         continue;
5057                 }
5058                 sc->sc_tx_timer = 15;
5059         }
5060         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5061 }
5062
5063 static void
5064 iwm_stop(struct iwm_softc *sc)
5065 {
5066
5067         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5068         sc->sc_flags |= IWM_FLAG_STOPPED;
5069         sc->sc_generation++;
5070         iwm_led_blink_stop(sc);
5071         sc->sc_tx_timer = 0;
5072         iwm_stop_device(sc);
5073         sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5074 }
5075
5076 static void
5077 iwm_watchdog(void *arg)
5078 {
5079         struct iwm_softc *sc = arg;
5080         struct ieee80211com *ic = &sc->sc_ic;
5081
5082         if (sc->sc_tx_timer > 0) {
5083                 if (--sc->sc_tx_timer == 0) {
5084                         device_printf(sc->sc_dev, "device timeout\n");
5085 #ifdef IWM_DEBUG
5086                         iwm_nic_error(sc);
5087 #endif
5088                         ieee80211_restart_all(ic);
5089                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5090                         return;
5091                 }
5092         }
5093         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5094 }
5095
5096 static void
5097 iwm_parent(struct ieee80211com *ic)
5098 {
5099         struct iwm_softc *sc = ic->ic_softc;
5100         int startall = 0;
5101
5102         IWM_LOCK(sc);
5103         if (ic->ic_nrunning > 0) {
5104                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5105                         iwm_init(sc);
5106                         startall = 1;
5107                 }
5108         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5109                 iwm_stop(sc);
5110         IWM_UNLOCK(sc);
5111         if (startall)
5112                 ieee80211_start_all(ic);
5113 }
5114
5115 /*
5116  * The interrupt side of things
5117  */
5118
5119 /*
5120  * error dumping routines are from iwlwifi/mvm/utils.c
5121  */
5122
5123 /*
5124  * Note: This structure is read from the device with IO accesses,
5125  * and the reading already does the endian conversion. As it is
5126  * read with uint32_t-sized accesses, any members with a different size
5127  * need to be ordered correctly though!
5128  */
5129 struct iwm_error_event_table {
5130         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5131         uint32_t error_id;              /* type of error */
5132         uint32_t trm_hw_status0;        /* TRM HW status */
5133         uint32_t trm_hw_status1;        /* TRM HW status */
5134         uint32_t blink2;                /* branch link */
5135         uint32_t ilink1;                /* interrupt link */
5136         uint32_t ilink2;                /* interrupt link */
5137         uint32_t data1;         /* error-specific data */
5138         uint32_t data2;         /* error-specific data */
5139         uint32_t data3;         /* error-specific data */
5140         uint32_t bcon_time;             /* beacon timer */
5141         uint32_t tsf_low;               /* network timestamp function timer */
5142         uint32_t tsf_hi;                /* network timestamp function timer */
5143         uint32_t gp1;           /* GP1 timer register */
5144         uint32_t gp2;           /* GP2 timer register */
5145         uint32_t fw_rev_type;   /* firmware revision type */
5146         uint32_t major;         /* uCode version major */
5147         uint32_t minor;         /* uCode version minor */
5148         uint32_t hw_ver;                /* HW Silicon version */
5149         uint32_t brd_ver;               /* HW board version */
5150         uint32_t log_pc;                /* log program counter */
5151         uint32_t frame_ptr;             /* frame pointer */
5152         uint32_t stack_ptr;             /* stack pointer */
5153         uint32_t hcmd;          /* last host command header */
5154         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
5155                                  * rxtx_flag */
5156         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
5157                                  * host_flag */
5158         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
5159                                  * enc_flag */
5160         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
5161                                  * time_flag */
5162         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
5163                                  * wico interrupt */
5164         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
5165         uint32_t wait_event;            /* wait event() caller address */
5166         uint32_t l2p_control;   /* L2pControlField */
5167         uint32_t l2p_duration;  /* L2pDurationField */
5168         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
5169         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
5170         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
5171                                  * (LMPM_PMG_SEL) */
5172         uint32_t u_timestamp;   /* indicate when the date and time of the
5173                                  * compilation */
5174         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
5175 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5176
5177 /*
5178  * UMAC error struct - relevant starting from family 8000 chip.
5179  * Note: This structure is read from the device with IO accesses,
5180  * and the reading already does the endian conversion. As it is
5181  * read with u32-sized accesses, any members with a different size
5182  * need to be ordered correctly though!
5183  */
5184 struct iwm_umac_error_event_table {
5185         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5186         uint32_t error_id;      /* type of error */
5187         uint32_t blink1;        /* branch link */
5188         uint32_t blink2;        /* branch link */
5189         uint32_t ilink1;        /* interrupt link */
5190         uint32_t ilink2;        /* interrupt link */
5191         uint32_t data1;         /* error-specific data */
5192         uint32_t data2;         /* error-specific data */
5193         uint32_t data3;         /* error-specific data */
5194         uint32_t umac_major;
5195         uint32_t umac_minor;
5196         uint32_t frame_pointer; /* core register 27*/
5197         uint32_t stack_pointer; /* core register 28 */
5198         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
5199         uint32_t nic_isr_pref;  /* ISR status register */
5200 } __packed;
5201
5202 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5203 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5204
5205 #ifdef IWM_DEBUG
5206 struct {
5207         const char *name;
5208         uint8_t num;
5209 } advanced_lookup[] = {
5210         { "NMI_INTERRUPT_WDG", 0x34 },
5211         { "SYSASSERT", 0x35 },
5212         { "UCODE_VERSION_MISMATCH", 0x37 },
5213         { "BAD_COMMAND", 0x38 },
5214         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5215         { "FATAL_ERROR", 0x3D },
5216         { "NMI_TRM_HW_ERR", 0x46 },
5217         { "NMI_INTERRUPT_TRM", 0x4C },
5218         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5219         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5220         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5221         { "NMI_INTERRUPT_HOST", 0x66 },
5222         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5223         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5224         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5225         { "ADVANCED_SYSASSERT", 0 },
5226 };
5227
5228 static const char *
5229 iwm_desc_lookup(uint32_t num)
5230 {
5231         int i;
5232
5233         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5234                 if (advanced_lookup[i].num == num)
5235                         return advanced_lookup[i].name;
5236
5237         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5238         return advanced_lookup[i].name;
5239 }
5240
5241 static void
5242 iwm_nic_umac_error(struct iwm_softc *sc)
5243 {
5244         struct iwm_umac_error_event_table table;
5245         uint32_t base;
5246
5247         base = sc->umac_error_event_table;
5248
5249         if (base < 0x800000) {
5250                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5251                     base);
5252                 return;
5253         }
5254
5255         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5256                 device_printf(sc->sc_dev, "reading errlog failed\n");
5257                 return;
5258         }
5259
5260         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5261                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5262                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5263                     sc->sc_flags, table.valid);
5264         }
5265
5266         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5267                 iwm_desc_lookup(table.error_id));
5268         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5269         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5270         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5271             table.ilink1);
5272         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5273             table.ilink2);
5274         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5275         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5276         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5277         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5278         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5279         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5280             table.frame_pointer);
5281         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5282             table.stack_pointer);
5283         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5284         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5285             table.nic_isr_pref);
5286 }
5287
5288 /*
5289  * Support for dumping the error log seemed like a good idea ...
5290  * but it's mostly hex junk and the only sensible thing is the
5291  * hw/ucode revision (which we know anyway).  Since it's here,
5292  * I'll just leave it in, just in case e.g. the Intel guys want to
5293  * help us decipher some "ADVANCED_SYSASSERT" later.
5294  */
5295 static void
5296 iwm_nic_error(struct iwm_softc *sc)
5297 {
5298         struct iwm_error_event_table table;
5299         uint32_t base;
5300
5301         device_printf(sc->sc_dev, "dumping device error log\n");
5302         base = sc->error_event_table;
5303         if (base < 0x800000) {
5304                 device_printf(sc->sc_dev,
5305                     "Invalid error log pointer 0x%08x\n", base);
5306                 return;
5307         }
5308
5309         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5310                 device_printf(sc->sc_dev, "reading errlog failed\n");
5311                 return;
5312         }
5313
5314         if (!table.valid) {
5315                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5316                 return;
5317         }
5318
5319         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5320                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5321                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5322                     sc->sc_flags, table.valid);
5323         }
5324
5325         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5326             iwm_desc_lookup(table.error_id));
5327         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5328             table.trm_hw_status0);
5329         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5330             table.trm_hw_status1);
5331         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5332         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5333         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5334         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5335         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5336         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5337         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5338         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5339         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5340         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5341         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5342         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5343             table.fw_rev_type);
5344         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5345         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5346         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5347         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5348         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5349         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5350         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5351         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5352         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5353         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5354         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5355         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5356         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5357         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5358         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5359         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5360         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5361         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5362         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5363
5364         if (sc->umac_error_event_table)
5365                 iwm_nic_umac_error(sc);
5366 }
5367 #endif
5368
5369 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5370
5371 /*
5372  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5373  * Basic structure from if_iwn
5374  */
5375 static void
5376 iwm_notif_intr(struct iwm_softc *sc)
5377 {
5378         struct ieee80211com *ic = &sc->sc_ic;
5379         uint16_t hw;
5380
5381         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5382             BUS_DMASYNC_POSTREAD);
5383
5384         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5385
5386         /*
5387          * Process responses
5388          */
5389         while (sc->rxq.cur != hw) {
5390                 struct iwm_rx_ring *ring = &sc->rxq;
5391                 struct iwm_rx_data *data = &ring->data[ring->cur];
5392                 struct iwm_rx_packet *pkt;
5393                 struct iwm_cmd_response *cresp;
5394                 int qid, idx, code;
5395
5396                 bus_dmamap_sync(ring->data_dmat, data->map,
5397                     BUS_DMASYNC_POSTREAD);
5398                 pkt = mtod(data->m, struct iwm_rx_packet *);
5399
5400                 qid = pkt->hdr.qid & ~0x80;
5401                 idx = pkt->hdr.idx;
5402
5403                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5404                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5405                     "rx packet qid=%d idx=%d type=%x %d %d\n",
5406                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5407
5408                 /*
5409                  * randomly get these from the firmware, no idea why.
5410                  * they at least seem harmless, so just ignore them for now
5411                  */
5412                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5413                     || pkt->len_n_flags == htole32(0x55550000))) {
5414                         ADVANCE_RXQ(sc);
5415                         continue;
5416                 }
5417
5418                 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5419
5420                 switch (code) {
5421                 case IWM_REPLY_RX_PHY_CMD:
5422                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5423                         break;
5424
5425                 case IWM_REPLY_RX_MPDU_CMD:
5426                         iwm_mvm_rx_rx_mpdu(sc, data->m);
5427                         break;
5428
5429                 case IWM_TX_CMD:
5430                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
5431                         break;
5432
5433                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5434                         struct iwm_missed_beacons_notif *resp;
5435                         int missed;
5436
5437                         /* XXX look at mac_id to determine interface ID */
5438                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5439
5440                         resp = (void *)pkt->data;
5441                         missed = le32toh(resp->consec_missed_beacons);
5442
5443                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5444                             "%s: MISSED_BEACON: mac_id=%d, "
5445                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5446                             "num_rx=%d\n",
5447                             __func__,
5448                             le32toh(resp->mac_id),
5449                             le32toh(resp->consec_missed_beacons_since_last_rx),
5450                             le32toh(resp->consec_missed_beacons),
5451                             le32toh(resp->num_expected_beacons),
5452                             le32toh(resp->num_recvd_beacons));
5453
5454                         /* Be paranoid */
5455                         if (vap == NULL)
5456                                 break;
5457
5458                         /* XXX no net80211 locking? */
5459                         if (vap->iv_state == IEEE80211_S_RUN &&
5460                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5461                                 if (missed > vap->iv_bmissthreshold) {
5462                                         /* XXX bad locking; turn into task */
5463                                         IWM_UNLOCK(sc);
5464                                         ieee80211_beacon_miss(ic);
5465                                         IWM_LOCK(sc);
5466                                 }
5467                         }
5468
5469                         break;
5470                 }
5471
5472                 case IWM_MFUART_LOAD_NOTIFICATION:
5473                         break;
5474
5475                 case IWM_MVM_ALIVE:
5476                         break;
5477
5478                 case IWM_CALIB_RES_NOTIF_PHY_DB:
5479                         break;
5480
5481                 case IWM_STATISTICS_NOTIFICATION: {
5482                         struct iwm_notif_statistics *stats;
5483                         stats = (void *)pkt->data;
5484                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5485                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5486                         break;
5487                 }
5488
5489                 case IWM_NVM_ACCESS_CMD:
5490                 case IWM_MCC_UPDATE_CMD:
5491                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5492                                 memcpy(sc->sc_cmd_resp,
5493                                     pkt, sizeof(sc->sc_cmd_resp));
5494                         }
5495                         break;
5496
5497                 case IWM_MCC_CHUB_UPDATE_CMD: {
5498                         struct iwm_mcc_chub_notif *notif;
5499                         notif = (void *)pkt->data;
5500
5501                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5502                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5503                         sc->sc_fw_mcc[2] = '\0';
5504                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5505                             "fw source %d sent CC '%s'\n",
5506                             notif->source_id, sc->sc_fw_mcc);
5507                         break;
5508                 }
5509
5510                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5511                 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5512                                  IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5513                         struct iwm_dts_measurement_notif_v1 *notif;
5514
5515                         if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5516                                 device_printf(sc->sc_dev,
5517                                     "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5518                                 break;
5519                         }
5520                         notif = (void *)pkt->data;
5521                         IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5522                             "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5523                             notif->temp);
5524                         break;
5525                 }
5526
5527                 case IWM_PHY_CONFIGURATION_CMD:
5528                 case IWM_TX_ANT_CONFIGURATION_CMD:
5529                 case IWM_ADD_STA:
5530                 case IWM_MAC_CONTEXT_CMD:
5531                 case IWM_REPLY_SF_CFG_CMD:
5532                 case IWM_POWER_TABLE_CMD:
5533                 case IWM_PHY_CONTEXT_CMD:
5534                 case IWM_BINDING_CONTEXT_CMD:
5535                 case IWM_TIME_EVENT_CMD:
5536                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5537                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5538                 case IWM_SCAN_ABORT_UMAC:
5539                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5540                 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5541                 case IWM_REPLY_BEACON_FILTERING_CMD:
5542                 case IWM_MAC_PM_POWER_TABLE:
5543                 case IWM_TIME_QUOTA_CMD:
5544                 case IWM_REMOVE_STA:
5545                 case IWM_TXPATH_FLUSH:
5546                 case IWM_LQ_CMD:
5547                 case IWM_FW_PAGING_BLOCK_CMD:
5548                 case IWM_BT_CONFIG:
5549                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5550                         cresp = (void *)pkt->data;
5551                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5552                                 memcpy(sc->sc_cmd_resp,
5553                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5554                         }
5555                         break;
5556
5557                 /* ignore */
5558                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5559                         break;
5560
5561                 case IWM_INIT_COMPLETE_NOTIF:
5562                         break;
5563
5564                 case IWM_SCAN_OFFLOAD_COMPLETE:
5565                         iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5566                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5567                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5568                                 ieee80211_runtask(ic, &sc->sc_es_task);
5569                         }
5570                         break;
5571
5572                 case IWM_SCAN_ITERATION_COMPLETE: {
5573                         struct iwm_lmac_scan_complete_notif *notif;
5574                         notif = (void *)pkt->data;
5575                         break;
5576                 }
5577
5578                 case IWM_SCAN_COMPLETE_UMAC:
5579                         iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5580                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5581                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5582                                 ieee80211_runtask(ic, &sc->sc_es_task);
5583                         }
5584                         break;
5585
5586                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5587                         struct iwm_umac_scan_iter_complete_notif *notif;
5588                         notif = (void *)pkt->data;
5589
5590                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5591                             "complete, status=0x%x, %d channels scanned\n",
5592                             notif->status, notif->scanned_channels);
5593                         break;
5594                 }
5595
5596                 case IWM_REPLY_ERROR: {
5597                         struct iwm_error_resp *resp;
5598                         resp = (void *)pkt->data;
5599
5600                         device_printf(sc->sc_dev,
5601                             "firmware error 0x%x, cmd 0x%x\n",
5602                             le32toh(resp->error_type),
5603                             resp->cmd_id);
5604                         break;
5605                 }
5606
5607                 case IWM_TIME_EVENT_NOTIFICATION: {
5608                         struct iwm_time_event_notif *notif;
5609                         notif = (void *)pkt->data;
5610
5611                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5612                             "TE notif status = 0x%x action = 0x%x\n",
5613                             notif->status, notif->action);
5614                         break;
5615                 }
5616
5617                 case IWM_MCAST_FILTER_CMD:
5618                         break;
5619
5620                 case IWM_SCD_QUEUE_CFG: {
5621                         struct iwm_scd_txq_cfg_rsp *rsp;
5622                         rsp = (void *)pkt->data;
5623
5624                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5625                             "queue cfg token=0x%x sta_id=%d "
5626                             "tid=%d scd_queue=%d\n",
5627                             rsp->token, rsp->sta_id, rsp->tid,
5628                             rsp->scd_queue);
5629                         break;
5630                 }
5631
5632                 default:
5633                         device_printf(sc->sc_dev,
5634                             "frame %d/%d %x UNHANDLED (this should "
5635                             "not happen)\n", qid, idx,
5636                             pkt->len_n_flags);
5637                         break;
5638                 }
5639
5640                 /*
5641                  * Why test bit 0x80?  The Linux driver:
5642                  *
5643                  * There is one exception:  uCode sets bit 15 when it
5644                  * originates the response/notification, i.e. when the
5645                  * response/notification is not a direct response to a
5646                  * command sent by the driver.  For example, uCode issues
5647                  * IWM_REPLY_RX when it sends a received frame to the driver;
5648                  * it is not a direct response to any driver command.
5649                  *
5650                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5651                  * uses a slightly different format for pkt->hdr, and "qid"
5652                  * is actually the upper byte of a two-byte field.
5653                  */
5654                 if (!(pkt->hdr.qid & (1 << 7))) {
5655                         iwm_cmd_done(sc, pkt);
5656                 }
5657
5658                 ADVANCE_RXQ(sc);
5659         }
5660
5661         /*
5662          * Tell the firmware what we have processed.
5663          * Seems like the hardware gets upset unless we align
5664          * the write by 8??
5665          */
5666         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5667         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5668 }
5669
5670 static void
5671 iwm_intr(void *arg)
5672 {
5673         struct iwm_softc *sc = arg;
5674         int handled = 0;
5675         int r1, r2, rv = 0;
5676         int isperiodic = 0;
5677
5678         IWM_LOCK(sc);
5679         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5680
5681         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5682                 uint32_t *ict = sc->ict_dma.vaddr;
5683                 int tmp;
5684
5685                 tmp = htole32(ict[sc->ict_cur]);
5686                 if (!tmp)
5687                         goto out_ena;
5688
5689                 /*
5690                  * ok, there was something.  keep plowing until we have all.
5691                  */
5692                 r1 = r2 = 0;
5693                 while (tmp) {
5694                         r1 |= tmp;
5695                         ict[sc->ict_cur] = 0;
5696                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5697                         tmp = htole32(ict[sc->ict_cur]);
5698                 }
5699
5700                 /* this is where the fun begins.  don't ask */
5701                 if (r1 == 0xffffffff)
5702                         r1 = 0;
5703
5704                 /* i am not expected to understand this */
5705                 if (r1 & 0xc0000)
5706                         r1 |= 0x8000;
5707                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5708         } else {
5709                 r1 = IWM_READ(sc, IWM_CSR_INT);
5710                 /* "hardware gone" (where, fishing?) */
5711                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5712                         goto out;
5713                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5714         }
5715         if (r1 == 0 && r2 == 0) {
5716                 goto out_ena;
5717         }
5718
5719         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5720
5721         /* Safely ignore these bits for debug checks below */
5722         r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5723
5724         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5725                 int i;
5726                 struct ieee80211com *ic = &sc->sc_ic;
5727                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5728
5729 #ifdef IWM_DEBUG
5730                 iwm_nic_error(sc);
5731 #endif
5732                 /* Dump driver status (TX and RX rings) while we're here. */
5733                 device_printf(sc->sc_dev, "driver status:\n");
5734                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5735                         struct iwm_tx_ring *ring = &sc->txq[i];
5736                         device_printf(sc->sc_dev,
5737                             "  tx ring %2d: qid=%-2d cur=%-3d "
5738                             "queued=%-3d\n",
5739                             i, ring->qid, ring->cur, ring->queued);
5740                 }
5741                 device_printf(sc->sc_dev,
5742                     "  rx ring: cur=%d\n", sc->rxq.cur);
5743                 device_printf(sc->sc_dev,
5744                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5745
5746                 /* Don't stop the device; just do a VAP restart */
5747                 IWM_UNLOCK(sc);
5748
5749                 if (vap == NULL) {
5750                         printf("%s: null vap\n", __func__);
5751                         return;
5752                 }
5753
5754                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5755                     "restarting\n", __func__, vap->iv_state);
5756
5757                 /* XXX TODO: turn this into a callout/taskqueue */
5758                 ieee80211_restart_all(ic);
5759                 return;
5760         }
5761
5762         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5763                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5764                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5765                 iwm_stop(sc);
5766                 rv = 1;
5767                 goto out;
5768         }
5769
5770         /* firmware chunk loaded */
5771         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5772                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5773                 handled |= IWM_CSR_INT_BIT_FH_TX;
5774                 sc->sc_fw_chunk_done = 1;
5775                 wakeup(&sc->sc_fw);
5776         }
5777
5778         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5779                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5780                 if (iwm_check_rfkill(sc)) {
5781                         device_printf(sc->sc_dev,
5782                             "%s: rfkill switch, disabling interface\n",
5783                             __func__);
5784                         iwm_stop(sc);
5785                 }
5786         }
5787
5788         /*
5789          * The Linux driver uses periodic interrupts to avoid races.
5790          * We cargo-cult like it's going out of fashion.
5791          */
5792         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5793                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5794                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5795                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5796                         IWM_WRITE_1(sc,
5797                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5798                 isperiodic = 1;
5799         }
5800
5801         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5802                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5803                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5804
5805                 iwm_notif_intr(sc);
5806
5807                 /* enable periodic interrupt, see above */
5808                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5809                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5810                             IWM_CSR_INT_PERIODIC_ENA);
5811         }
5812
5813         if (__predict_false(r1 & ~handled))
5814                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5815                     "%s: unhandled interrupts: %x\n", __func__, r1);
5816         rv = 1;
5817
5818  out_ena:
5819         iwm_restore_interrupts(sc);
5820  out:
5821         IWM_UNLOCK(sc);
5822         return;
5823 }
5824
5825 /*
5826  * Autoconf glue-sniffing
5827  */
5828 #define PCI_VENDOR_INTEL                0x8086
5829 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5830 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5831 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5832 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5833 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5834 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5835 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5836 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5837 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5838 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5839
5840 static const struct iwm_devices {
5841         uint16_t                device;
5842         const struct iwm_cfg    *cfg;
5843 } iwm_devices[] = {
5844         { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5845         { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5846         { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5847         { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5848         { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5849         { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5850         { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5851         { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5852         { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5853         { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5854 };
5855
5856 static int
5857 iwm_probe(device_t dev)
5858 {
5859         int i;
5860
5861         for (i = 0; i < nitems(iwm_devices); i++) {
5862                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5863                     pci_get_device(dev) == iwm_devices[i].device) {
5864                         device_set_desc(dev, iwm_devices[i].cfg->name);
5865                         return (BUS_PROBE_DEFAULT);
5866                 }
5867         }
5868
5869         return (ENXIO);
5870 }
5871
5872 static int
5873 iwm_dev_check(device_t dev)
5874 {
5875         struct iwm_softc *sc;
5876         uint16_t devid;
5877         int i;
5878
5879         sc = device_get_softc(dev);
5880
5881         devid = pci_get_device(dev);
5882         for (i = 0; i < nitems(iwm_devices); i++) {
5883                 if (iwm_devices[i].device == devid) {
5884                         sc->cfg = iwm_devices[i].cfg;
5885                         return (0);
5886                 }
5887         }
5888         device_printf(dev, "unknown adapter type\n");
5889         return ENXIO;
5890 }
5891
5892 /* PCI registers */
5893 #define PCI_CFG_RETRY_TIMEOUT   0x041
5894
5895 static int
5896 iwm_pci_attach(device_t dev)
5897 {
5898         struct iwm_softc *sc;
5899         int count, error, rid;
5900         uint16_t reg;
5901
5902         sc = device_get_softc(dev);
5903
5904         /* We disable the RETRY_TIMEOUT register (0x41) to keep
5905          * PCI Tx retries from interfering with C3 CPU state */
5906         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5907
5908         /* Enable bus-mastering and hardware bug workaround. */
5909         pci_enable_busmaster(dev);
5910         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5911         /* if !MSI */
5912         if (reg & PCIM_STATUS_INTxSTATE) {
5913                 reg &= ~PCIM_STATUS_INTxSTATE;
5914         }
5915         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5916
5917         rid = PCIR_BAR(0);
5918         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5919             RF_ACTIVE);
5920         if (sc->sc_mem == NULL) {
5921                 device_printf(sc->sc_dev, "can't map mem space\n");
5922                 return (ENXIO);
5923         }
5924         sc->sc_st = rman_get_bustag(sc->sc_mem);
5925         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5926
5927         /* Install interrupt handler. */
5928         count = 1;
5929         rid = 0;
5930         if (pci_alloc_msi(dev, &count) == 0)
5931                 rid = 1;
5932         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5933             (rid != 0 ? 0 : RF_SHAREABLE));
5934         if (sc->sc_irq == NULL) {
5935                 device_printf(dev, "can't map interrupt\n");
5936                         return (ENXIO);
5937         }
5938         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5939             NULL, iwm_intr, sc, &sc->sc_ih);
5940         if (sc->sc_ih == NULL) {
5941                 device_printf(dev, "can't establish interrupt");
5942                         return (ENXIO);
5943         }
5944         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5945
5946         return (0);
5947 }
5948
5949 static void
5950 iwm_pci_detach(device_t dev)
5951 {
5952         struct iwm_softc *sc = device_get_softc(dev);
5953
5954         if (sc->sc_irq != NULL) {
5955                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5956                 bus_release_resource(dev, SYS_RES_IRQ,
5957                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5958                 pci_release_msi(dev);
5959         }
5960         if (sc->sc_mem != NULL)
5961                 bus_release_resource(dev, SYS_RES_MEMORY,
5962                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5963 }
5964
5965
5966
5967 static int
5968 iwm_attach(device_t dev)
5969 {
5970         struct iwm_softc *sc = device_get_softc(dev);
5971         struct ieee80211com *ic = &sc->sc_ic;
5972         int error;
5973         int txq_i, i;
5974
5975         sc->sc_dev = dev;
5976         sc->sc_attached = 1;
5977         IWM_LOCK_INIT(sc);
5978         mbufq_init(&sc->sc_snd, ifqmaxlen);
5979         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5980         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5981         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5982
5983         sc->sc_notif_wait = iwm_notification_wait_init(sc);
5984         if (sc->sc_notif_wait == NULL) {
5985                 device_printf(dev, "failed to init notification wait struct\n");
5986                 goto fail;
5987         }
5988
5989         /* Init phy db */
5990         sc->sc_phy_db = iwm_phy_db_init(sc);
5991         if (!sc->sc_phy_db) {
5992                 device_printf(dev, "Cannot init phy_db\n");
5993                 goto fail;
5994         }
5995
5996         /* Set EBS as successful as long as not stated otherwise by the FW. */
5997         sc->last_ebs_successful = TRUE;
5998
5999         /* PCI attach */
6000         error = iwm_pci_attach(dev);
6001         if (error != 0)
6002                 goto fail;
6003
6004         sc->sc_wantresp = -1;
6005
6006         /* Check device type */
6007         error = iwm_dev_check(dev);
6008         if (error != 0)
6009                 goto fail;
6010
6011         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6012         /*
6013          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6014          * changed, and now the revision step also includes bit 0-1 (no more
6015          * "dash" value). To keep hw_rev backwards compatible - we'll store it
6016          * in the old format.
6017          */
6018         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6019                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6020                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6021
6022         if (iwm_prepare_card_hw(sc) != 0) {
6023                 device_printf(dev, "could not initialize hardware\n");
6024                 goto fail;
6025         }
6026
6027         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6028                 int ret;
6029                 uint32_t hw_step;
6030
6031                 /*
6032                  * In order to recognize C step the driver should read the
6033                  * chip version id located at the AUX bus MISC address.
6034                  */
6035                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6036                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6037                 DELAY(2);
6038
6039                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6040                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6041                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6042                                    25000);
6043                 if (!ret) {
6044                         device_printf(sc->sc_dev,
6045                             "Failed to wake up the nic\n");
6046                         goto fail;
6047                 }
6048
6049                 if (iwm_nic_lock(sc)) {
6050                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6051                         hw_step |= IWM_ENABLE_WFPM;
6052                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6053                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6054                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6055                         if (hw_step == 0x3)
6056                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6057                                                 (IWM_SILICON_C_STEP << 2);
6058                         iwm_nic_unlock(sc);
6059                 } else {
6060                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
6061                         goto fail;
6062                 }
6063         }
6064
6065         /* special-case 7265D, it has the same PCI IDs. */
6066         if (sc->cfg == &iwm7265_cfg &&
6067             (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6068                 sc->cfg = &iwm7265d_cfg;
6069         }
6070
6071         /* Allocate DMA memory for firmware transfers. */
6072         if ((error = iwm_alloc_fwmem(sc)) != 0) {
6073                 device_printf(dev, "could not allocate memory for firmware\n");
6074                 goto fail;
6075         }
6076
6077         /* Allocate "Keep Warm" page. */
6078         if ((error = iwm_alloc_kw(sc)) != 0) {
6079                 device_printf(dev, "could not allocate keep warm page\n");
6080                 goto fail;
6081         }
6082
6083         /* We use ICT interrupts */
6084         if ((error = iwm_alloc_ict(sc)) != 0) {
6085                 device_printf(dev, "could not allocate ICT table\n");
6086                 goto fail;
6087         }
6088
6089         /* Allocate TX scheduler "rings". */
6090         if ((error = iwm_alloc_sched(sc)) != 0) {
6091                 device_printf(dev, "could not allocate TX scheduler rings\n");
6092                 goto fail;
6093         }
6094
6095         /* Allocate TX rings */
6096         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6097                 if ((error = iwm_alloc_tx_ring(sc,
6098                     &sc->txq[txq_i], txq_i)) != 0) {
6099                         device_printf(dev,
6100                             "could not allocate TX ring %d\n",
6101                             txq_i);
6102                         goto fail;
6103                 }
6104         }
6105
6106         /* Allocate RX ring. */
6107         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6108                 device_printf(dev, "could not allocate RX ring\n");
6109                 goto fail;
6110         }
6111
6112         /* Clear pending interrupts. */
6113         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6114
6115         ic->ic_softc = sc;
6116         ic->ic_name = device_get_nameunit(sc->sc_dev);
6117         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
6118         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
6119
6120         /* Set device capabilities. */
6121         ic->ic_caps =
6122             IEEE80211_C_STA |
6123             IEEE80211_C_WPA |           /* WPA/RSN */
6124             IEEE80211_C_WME |
6125             IEEE80211_C_SHSLOT |        /* short slot time supported */
6126             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
6127 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
6128             ;
6129         /* Advertise full-offload scanning */
6130         ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6131         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6132                 sc->sc_phyctxt[i].id = i;
6133                 sc->sc_phyctxt[i].color = 0;
6134                 sc->sc_phyctxt[i].ref = 0;
6135                 sc->sc_phyctxt[i].channel = NULL;
6136         }
6137
6138         /* Default noise floor */
6139         sc->sc_noise = -96;
6140
6141         /* Max RSSI */
6142         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6143
6144         sc->sc_preinit_hook.ich_func = iwm_preinit;
6145         sc->sc_preinit_hook.ich_arg = sc;
6146         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6147                 device_printf(dev, "config_intrhook_establish failed\n");
6148                 goto fail;
6149         }
6150
6151 #ifdef IWM_DEBUG
6152         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6153             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6154             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6155 #endif
6156
6157         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6158             "<-%s\n", __func__);
6159
6160         return 0;
6161
6162         /* Free allocated memory if something failed during attachment. */
6163 fail:
6164         iwm_detach_local(sc, 0);
6165
6166         return ENXIO;
6167 }
6168
6169 static int
6170 iwm_is_valid_ether_addr(uint8_t *addr)
6171 {
6172         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6173
6174         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6175                 return (FALSE);
6176
6177         return (TRUE);
6178 }
6179
6180 static int
6181 iwm_update_edca(struct ieee80211com *ic)
6182 {
6183         struct iwm_softc *sc = ic->ic_softc;
6184
6185         device_printf(sc->sc_dev, "%s: called\n", __func__);
6186         return (0);
6187 }
6188
6189 static void
6190 iwm_preinit(void *arg)
6191 {
6192         struct iwm_softc *sc = arg;
6193         device_t dev = sc->sc_dev;
6194         struct ieee80211com *ic = &sc->sc_ic;
6195         int error;
6196
6197         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6198             "->%s\n", __func__);
6199
6200         IWM_LOCK(sc);
6201         if ((error = iwm_start_hw(sc)) != 0) {
6202                 device_printf(dev, "could not initialize hardware\n");
6203                 IWM_UNLOCK(sc);
6204                 goto fail;
6205         }
6206
6207         error = iwm_run_init_mvm_ucode(sc, 1);
6208         iwm_stop_device(sc);
6209         if (error) {
6210                 IWM_UNLOCK(sc);
6211                 goto fail;
6212         }
6213         device_printf(dev,
6214             "hw rev 0x%x, fw ver %s, address %s\n",
6215             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6216             sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6217
6218         /* not all hardware can do 5GHz band */
6219         if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6220                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6221                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6222         IWM_UNLOCK(sc);
6223
6224         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6225             ic->ic_channels);
6226
6227         /*
6228          * At this point we've committed - if we fail to do setup,
6229          * we now also have to tear down the net80211 state.
6230          */
6231         ieee80211_ifattach(ic);
6232         ic->ic_vap_create = iwm_vap_create;
6233         ic->ic_vap_delete = iwm_vap_delete;
6234         ic->ic_raw_xmit = iwm_raw_xmit;
6235         ic->ic_node_alloc = iwm_node_alloc;
6236         ic->ic_scan_start = iwm_scan_start;
6237         ic->ic_scan_end = iwm_scan_end;
6238         ic->ic_update_mcast = iwm_update_mcast;
6239         ic->ic_getradiocaps = iwm_init_channel_map;
6240         ic->ic_set_channel = iwm_set_channel;
6241         ic->ic_scan_curchan = iwm_scan_curchan;
6242         ic->ic_scan_mindwell = iwm_scan_mindwell;
6243         ic->ic_wme.wme_update = iwm_update_edca;
6244         ic->ic_parent = iwm_parent;
6245         ic->ic_transmit = iwm_transmit;
6246         iwm_radiotap_attach(sc);
6247         if (bootverbose)
6248                 ieee80211_announce(ic);
6249
6250         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6251             "<-%s\n", __func__);
6252         config_intrhook_disestablish(&sc->sc_preinit_hook);
6253
6254         return;
6255 fail:
6256         config_intrhook_disestablish(&sc->sc_preinit_hook);
6257         iwm_detach_local(sc, 0);
6258 }
6259
6260 /*
6261  * Attach the interface to 802.11 radiotap.
6262  */
6263 static void
6264 iwm_radiotap_attach(struct iwm_softc *sc)
6265 {
6266         struct ieee80211com *ic = &sc->sc_ic;
6267
6268         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6269             "->%s begin\n", __func__);
6270         ieee80211_radiotap_attach(ic,
6271             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6272                 IWM_TX_RADIOTAP_PRESENT,
6273             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6274                 IWM_RX_RADIOTAP_PRESENT);
6275         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6276             "->%s end\n", __func__);
6277 }
6278
6279 static struct ieee80211vap *
6280 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6281     enum ieee80211_opmode opmode, int flags,
6282     const uint8_t bssid[IEEE80211_ADDR_LEN],
6283     const uint8_t mac[IEEE80211_ADDR_LEN])
6284 {
6285         struct iwm_vap *ivp;
6286         struct ieee80211vap *vap;
6287
6288         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6289                 return NULL;
6290         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6291         vap = &ivp->iv_vap;
6292         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6293         vap->iv_bmissthreshold = 10;            /* override default */
6294         /* Override with driver methods. */
6295         ivp->iv_newstate = vap->iv_newstate;
6296         vap->iv_newstate = iwm_newstate;
6297
6298         ieee80211_ratectl_init(vap);
6299         /* Complete setup. */
6300         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6301             mac);
6302         ic->ic_opmode = opmode;
6303
6304         return vap;
6305 }
6306
6307 static void
6308 iwm_vap_delete(struct ieee80211vap *vap)
6309 {
6310         struct iwm_vap *ivp = IWM_VAP(vap);
6311
6312         ieee80211_ratectl_deinit(vap);
6313         ieee80211_vap_detach(vap);
6314         free(ivp, M_80211_VAP);
6315 }
6316
6317 static void
6318 iwm_scan_start(struct ieee80211com *ic)
6319 {
6320         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6321         struct iwm_softc *sc = ic->ic_softc;
6322         int error;
6323
6324         IWM_LOCK(sc);
6325         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6326                 /* This should not be possible */
6327                 device_printf(sc->sc_dev,
6328                     "%s: Previous scan not completed yet\n", __func__);
6329         }
6330         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6331                 error = iwm_mvm_umac_scan(sc);
6332         else
6333                 error = iwm_mvm_lmac_scan(sc);
6334         if (error != 0) {
6335                 device_printf(sc->sc_dev, "could not initiate scan\n");
6336                 IWM_UNLOCK(sc);
6337                 ieee80211_cancel_scan(vap);
6338         } else {
6339                 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6340                 iwm_led_blink_start(sc);
6341                 IWM_UNLOCK(sc);
6342         }
6343 }
6344
6345 static void
6346 iwm_scan_end(struct ieee80211com *ic)
6347 {
6348         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6349         struct iwm_softc *sc = ic->ic_softc;
6350
6351         IWM_LOCK(sc);
6352         iwm_led_blink_stop(sc);
6353         if (vap->iv_state == IEEE80211_S_RUN)
6354                 iwm_mvm_led_enable(sc);
6355         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6356                 /*
6357                  * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6358                  * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6359                  * taskqueue.
6360                  */
6361                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6362                 iwm_mvm_scan_stop_wait(sc);
6363         }
6364         IWM_UNLOCK(sc);
6365
6366         /*
6367          * Make sure we don't race, if sc_es_task is still enqueued here.
6368          * This is to make sure that it won't call ieee80211_scan_done
6369          * when we have already started the next scan.
6370          */
6371         taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6372 }
6373
6374 static void
6375 iwm_update_mcast(struct ieee80211com *ic)
6376 {
6377 }
6378
6379 static void
6380 iwm_set_channel(struct ieee80211com *ic)
6381 {
6382 }
6383
6384 static void
6385 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6386 {
6387 }
6388
6389 static void
6390 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6391 {
6392         return;
6393 }
6394
6395 void
6396 iwm_init_task(void *arg1)
6397 {
6398         struct iwm_softc *sc = arg1;
6399
6400         IWM_LOCK(sc);
6401         while (sc->sc_flags & IWM_FLAG_BUSY)
6402                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6403         sc->sc_flags |= IWM_FLAG_BUSY;
6404         iwm_stop(sc);
6405         if (sc->sc_ic.ic_nrunning > 0)
6406                 iwm_init(sc);
6407         sc->sc_flags &= ~IWM_FLAG_BUSY;
6408         wakeup(&sc->sc_flags);
6409         IWM_UNLOCK(sc);
6410 }
6411
6412 static int
6413 iwm_resume(device_t dev)
6414 {
6415         struct iwm_softc *sc = device_get_softc(dev);
6416         int do_reinit = 0;
6417
6418         /*
6419          * We disable the RETRY_TIMEOUT register (0x41) to keep
6420          * PCI Tx retries from interfering with C3 CPU state.
6421          */
6422         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6423         iwm_init_task(device_get_softc(dev));
6424
6425         IWM_LOCK(sc);
6426         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6427                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6428                 do_reinit = 1;
6429         }
6430         IWM_UNLOCK(sc);
6431
6432         if (do_reinit)
6433                 ieee80211_resume_all(&sc->sc_ic);
6434
6435         return 0;
6436 }
6437
6438 static int
6439 iwm_suspend(device_t dev)
6440 {
6441         int do_stop = 0;
6442         struct iwm_softc *sc = device_get_softc(dev);
6443
6444         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6445
6446         ieee80211_suspend_all(&sc->sc_ic);
6447
6448         if (do_stop) {
6449                 IWM_LOCK(sc);
6450                 iwm_stop(sc);
6451                 sc->sc_flags |= IWM_FLAG_SCANNING;
6452                 IWM_UNLOCK(sc);
6453         }
6454
6455         return (0);
6456 }
6457
6458 static int
6459 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6460 {
6461         struct iwm_fw_info *fw = &sc->sc_fw;
6462         device_t dev = sc->sc_dev;
6463         int i;
6464
6465         if (!sc->sc_attached)
6466                 return 0;
6467         sc->sc_attached = 0;
6468
6469         if (do_net80211)
6470                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6471
6472         callout_drain(&sc->sc_led_blink_to);
6473         callout_drain(&sc->sc_watchdog_to);
6474         iwm_stop_device(sc);
6475         if (do_net80211) {
6476                 ieee80211_ifdetach(&sc->sc_ic);
6477         }
6478
6479         iwm_phy_db_free(sc->sc_phy_db);
6480         sc->sc_phy_db = NULL;
6481
6482         iwm_free_nvm_data(sc->nvm_data);
6483
6484         /* Free descriptor rings */
6485         iwm_free_rx_ring(sc, &sc->rxq);
6486         for (i = 0; i < nitems(sc->txq); i++)
6487                 iwm_free_tx_ring(sc, &sc->txq[i]);
6488
6489         /* Free firmware */
6490         if (fw->fw_fp != NULL)
6491                 iwm_fw_info_free(fw);
6492
6493         /* Free scheduler */
6494         iwm_dma_contig_free(&sc->sched_dma);
6495         iwm_dma_contig_free(&sc->ict_dma);
6496         iwm_dma_contig_free(&sc->kw_dma);
6497         iwm_dma_contig_free(&sc->fw_dma);
6498
6499         iwm_free_fw_paging(sc);
6500
6501         /* Finished with the hardware - detach things */
6502         iwm_pci_detach(dev);
6503
6504         if (sc->sc_notif_wait != NULL) {
6505                 iwm_notification_wait_free(sc->sc_notif_wait);
6506                 sc->sc_notif_wait = NULL;
6507         }
6508
6509         mbufq_drain(&sc->sc_snd);
6510         IWM_LOCK_DESTROY(sc);
6511
6512         return (0);
6513 }
6514
6515 static int
6516 iwm_detach(device_t dev)
6517 {
6518         struct iwm_softc *sc = device_get_softc(dev);
6519
6520         return (iwm_detach_local(sc, 1));
6521 }
6522
6523 static device_method_t iwm_pci_methods[] = {
6524         /* Device interface */
6525         DEVMETHOD(device_probe,         iwm_probe),
6526         DEVMETHOD(device_attach,        iwm_attach),
6527         DEVMETHOD(device_detach,        iwm_detach),
6528         DEVMETHOD(device_suspend,       iwm_suspend),
6529         DEVMETHOD(device_resume,        iwm_resume),
6530
6531         DEVMETHOD_END
6532 };
6533
6534 static driver_t iwm_pci_driver = {
6535         "iwm",
6536         iwm_pci_methods,
6537         sizeof (struct iwm_softc)
6538 };
6539
6540 static devclass_t iwm_devclass;
6541
6542 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6543 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6544 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6545 MODULE_DEPEND(iwm, wlan, 1, 1, 1);