]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
MFC r318232:
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109
110 #include <sys/param.h>
111 #include <sys/bus.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
126
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
130
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
133
134 #include <net/bpf.h>
135
136 #include <net/if.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
142
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
147
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
152
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_config.h>
156 #include <dev/iwm/if_iwm_debug.h>
157 #include <dev/iwm/if_iwm_notif_wait.h>
158 #include <dev/iwm/if_iwm_util.h>
159 #include <dev/iwm/if_iwm_binding.h>
160 #include <dev/iwm/if_iwm_phy_db.h>
161 #include <dev/iwm/if_iwm_mac_ctxt.h>
162 #include <dev/iwm/if_iwm_phy_ctxt.h>
163 #include <dev/iwm/if_iwm_time_event.h>
164 #include <dev/iwm/if_iwm_power.h>
165 #include <dev/iwm/if_iwm_scan.h>
166 #include <dev/iwm/if_iwm_sta.h>
167
168 #include <dev/iwm/if_iwm_pcie_trans.h>
169 #include <dev/iwm/if_iwm_led.h>
170 #include <dev/iwm/if_iwm_fw.h>
171
172 /* From DragonflyBSD */
173 #define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
174
175 const uint8_t iwm_nvm_channels[] = {
176         /* 2.4 GHz */
177         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
178         /* 5 GHz */
179         36, 40, 44, 48, 52, 56, 60, 64,
180         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
181         149, 153, 157, 161, 165
182 };
183 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
184     "IWM_NUM_CHANNELS is too small");
185
186 const uint8_t iwm_nvm_channels_8000[] = {
187         /* 2.4 GHz */
188         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
189         /* 5 GHz */
190         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
191         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
192         149, 153, 157, 161, 165, 169, 173, 177, 181
193 };
194 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
195     "IWM_NUM_CHANNELS_8000 is too small");
196
197 #define IWM_NUM_2GHZ_CHANNELS   14
198 #define IWM_N_HW_ADDR_MASK      0xF
199
200 /*
201  * XXX For now, there's simply a fixed set of rate table entries
202  * that are populated.
203  */
204 const struct iwm_rate {
205         uint8_t rate;
206         uint8_t plcp;
207 } iwm_rates[] = {
208         {   2,  IWM_RATE_1M_PLCP  },
209         {   4,  IWM_RATE_2M_PLCP  },
210         {  11,  IWM_RATE_5M_PLCP  },
211         {  22,  IWM_RATE_11M_PLCP },
212         {  12,  IWM_RATE_6M_PLCP  },
213         {  18,  IWM_RATE_9M_PLCP  },
214         {  24,  IWM_RATE_12M_PLCP },
215         {  36,  IWM_RATE_18M_PLCP },
216         {  48,  IWM_RATE_24M_PLCP },
217         {  72,  IWM_RATE_36M_PLCP },
218         {  96,  IWM_RATE_48M_PLCP },
219         { 108,  IWM_RATE_54M_PLCP },
220 };
221 #define IWM_RIDX_CCK    0
222 #define IWM_RIDX_OFDM   4
223 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
224 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
225 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
226
227 struct iwm_nvm_section {
228         uint16_t length;
229         uint8_t *data;
230 };
231
232 #define IWM_MVM_UCODE_ALIVE_TIMEOUT     hz
233 #define IWM_MVM_UCODE_CALIB_TIMEOUT     (2*hz)
234
235 struct iwm_mvm_alive_data {
236         int valid;
237         uint32_t scd_base_addr;
238 };
239
240 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
241 static int      iwm_firmware_store_section(struct iwm_softc *,
242                                            enum iwm_ucode_type,
243                                            const uint8_t *, size_t);
244 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
245 static void     iwm_fw_info_free(struct iwm_fw_info *);
246 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
247 static int      iwm_alloc_fwmem(struct iwm_softc *);
248 static int      iwm_alloc_sched(struct iwm_softc *);
249 static int      iwm_alloc_kw(struct iwm_softc *);
250 static int      iwm_alloc_ict(struct iwm_softc *);
251 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
252 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
253 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
255                                   int);
256 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
257 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
258 static void     iwm_enable_interrupts(struct iwm_softc *);
259 static void     iwm_restore_interrupts(struct iwm_softc *);
260 static void     iwm_disable_interrupts(struct iwm_softc *);
261 static void     iwm_ict_reset(struct iwm_softc *);
262 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
263 static void     iwm_stop_device(struct iwm_softc *);
264 static void     iwm_mvm_nic_config(struct iwm_softc *);
265 static int      iwm_nic_rx_init(struct iwm_softc *);
266 static int      iwm_nic_tx_init(struct iwm_softc *);
267 static int      iwm_nic_init(struct iwm_softc *);
268 static int      iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
269 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
270                                    uint16_t, uint8_t *, uint16_t *);
271 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
272                                      uint16_t *, uint32_t);
273 static uint32_t iwm_eeprom_channel_flags(uint16_t);
274 static void     iwm_add_channel_band(struct iwm_softc *,
275                     struct ieee80211_channel[], int, int *, int, size_t,
276                     const uint8_t[]);
277 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
278                     struct ieee80211_channel[]);
279 static struct iwm_nvm_data *
280         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
281                            const uint16_t *, const uint16_t *,
282                            const uint16_t *, const uint16_t *,
283                            const uint16_t *);
284 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
285 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
286                                                struct iwm_nvm_data *,
287                                                const uint16_t *,
288                                                const uint16_t *);
289 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
290                             const uint16_t *);
291 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
292 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
293                                   const uint16_t *);
294 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
295                                    const uint16_t *);
296 static void     iwm_set_radio_cfg(const struct iwm_softc *,
297                                   struct iwm_nvm_data *, uint32_t);
298 static struct iwm_nvm_data *
299         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
300 static int      iwm_nvm_init(struct iwm_softc *);
301 static int      iwm_pcie_load_section(struct iwm_softc *, uint8_t,
302                                       const struct iwm_fw_desc *);
303 static int      iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
304                                              bus_addr_t, uint32_t);
305 static int      iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
306                                                 const struct iwm_fw_sects *,
307                                                 int, int *);
308 static int      iwm_pcie_load_cpu_sections(struct iwm_softc *,
309                                            const struct iwm_fw_sects *,
310                                            int, int *);
311 static int      iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
312                                                const struct iwm_fw_sects *);
313 static int      iwm_pcie_load_given_ucode(struct iwm_softc *,
314                                           const struct iwm_fw_sects *);
315 static int      iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
316 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
317 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
318 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
319                                               enum iwm_ucode_type);
320 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
321 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
322 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
323                                             struct iwm_rx_phy_info *);
324 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
325                                       struct iwm_rx_packet *);
326 static int      iwm_get_noise(struct iwm_softc *sc,
327                     const struct iwm_mvm_statistics_rx_non_phy *);
328 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
329                                     uint32_t, boolean_t);
330 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
331                                          struct iwm_rx_packet *,
332                                          struct iwm_node *);
333 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
334 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
335 #if 0
336 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
337                                  uint16_t);
338 #endif
339 static const struct iwm_rate *
340         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
341                         struct mbuf *, struct iwm_tx_cmd *);
342 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
343                        struct ieee80211_node *, int);
344 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
345                              const struct ieee80211_bpf_params *);
346 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
347 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
348 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
349 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
350 static struct ieee80211_node *
351                 iwm_node_alloc(struct ieee80211vap *,
352                                const uint8_t[IEEE80211_ADDR_LEN]);
353 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
354 static int      iwm_media_change(struct ifnet *);
355 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
356 static void     iwm_endscan_cb(void *, int);
357 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
358                                         struct iwm_sf_cfg_cmd *,
359                                         struct ieee80211_node *);
360 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
361 static int      iwm_send_bt_init_conf(struct iwm_softc *);
362 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
363 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
364 static int      iwm_init_hw(struct iwm_softc *);
365 static void     iwm_init(struct iwm_softc *);
366 static void     iwm_start(struct iwm_softc *);
367 static void     iwm_stop(struct iwm_softc *);
368 static void     iwm_watchdog(void *);
369 static void     iwm_parent(struct ieee80211com *);
370 #ifdef IWM_DEBUG
371 static const char *
372                 iwm_desc_lookup(uint32_t);
373 static void     iwm_nic_error(struct iwm_softc *);
374 static void     iwm_nic_umac_error(struct iwm_softc *);
375 #endif
376 static void     iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
377 static void     iwm_notif_intr(struct iwm_softc *);
378 static void     iwm_intr(void *);
379 static int      iwm_attach(device_t);
380 static int      iwm_is_valid_ether_addr(uint8_t *);
381 static void     iwm_preinit(void *);
382 static int      iwm_detach_local(struct iwm_softc *sc, int);
383 static void     iwm_init_task(void *);
384 static void     iwm_radiotap_attach(struct iwm_softc *);
385 static struct ieee80211vap *
386                 iwm_vap_create(struct ieee80211com *,
387                                const char [IFNAMSIZ], int,
388                                enum ieee80211_opmode, int,
389                                const uint8_t [IEEE80211_ADDR_LEN],
390                                const uint8_t [IEEE80211_ADDR_LEN]);
391 static void     iwm_vap_delete(struct ieee80211vap *);
392 static void     iwm_scan_start(struct ieee80211com *);
393 static void     iwm_scan_end(struct ieee80211com *);
394 static void     iwm_update_mcast(struct ieee80211com *);
395 static void     iwm_set_channel(struct ieee80211com *);
396 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
397 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
398 static int      iwm_detach(device_t);
399
400 /*
401  * Firmware parser.
402  */
403
404 static int
405 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
406 {
407         const struct iwm_fw_cscheme_list *l = (const void *)data;
408
409         if (dlen < sizeof(*l) ||
410             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
411                 return EINVAL;
412
413         /* we don't actually store anything for now, always use s/w crypto */
414
415         return 0;
416 }
417
418 static int
419 iwm_firmware_store_section(struct iwm_softc *sc,
420     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
421 {
422         struct iwm_fw_sects *fws;
423         struct iwm_fw_desc *fwone;
424
425         if (type >= IWM_UCODE_TYPE_MAX)
426                 return EINVAL;
427         if (dlen < sizeof(uint32_t))
428                 return EINVAL;
429
430         fws = &sc->sc_fw.fw_sects[type];
431         if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
432                 return EINVAL;
433
434         fwone = &fws->fw_sect[fws->fw_count];
435
436         /* first 32bit are device load offset */
437         memcpy(&fwone->offset, data, sizeof(uint32_t));
438
439         /* rest is data */
440         fwone->data = data + sizeof(uint32_t);
441         fwone->len = dlen - sizeof(uint32_t);
442
443         fws->fw_count++;
444
445         return 0;
446 }
447
448 #define IWM_DEFAULT_SCAN_CHANNELS 40
449
450 /* iwlwifi: iwl-drv.c */
451 struct iwm_tlv_calib_data {
452         uint32_t ucode_type;
453         struct iwm_tlv_calib_ctrl calib;
454 } __packed;
455
456 static int
457 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
458 {
459         const struct iwm_tlv_calib_data *def_calib = data;
460         uint32_t ucode_type = le32toh(def_calib->ucode_type);
461
462         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
463                 device_printf(sc->sc_dev,
464                     "Wrong ucode_type %u for default "
465                     "calibration.\n", ucode_type);
466                 return EINVAL;
467         }
468
469         sc->sc_default_calib[ucode_type].flow_trigger =
470             def_calib->calib.flow_trigger;
471         sc->sc_default_calib[ucode_type].event_trigger =
472             def_calib->calib.event_trigger;
473
474         return 0;
475 }
476
477 static int
478 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
479                         struct iwm_ucode_capabilities *capa)
480 {
481         const struct iwm_ucode_api *ucode_api = (const void *)data;
482         uint32_t api_index = le32toh(ucode_api->api_index);
483         uint32_t api_flags = le32toh(ucode_api->api_flags);
484         int i;
485
486         if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
487                 device_printf(sc->sc_dev,
488                     "api flags index %d larger than supported by driver\n",
489                     api_index);
490                 /* don't return an error so we can load FW that has more bits */
491                 return 0;
492         }
493
494         for (i = 0; i < 32; i++) {
495                 if (api_flags & (1U << i))
496                         setbit(capa->enabled_api, i + 32 * api_index);
497         }
498
499         return 0;
500 }
501
502 static int
503 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
504                            struct iwm_ucode_capabilities *capa)
505 {
506         const struct iwm_ucode_capa *ucode_capa = (const void *)data;
507         uint32_t api_index = le32toh(ucode_capa->api_index);
508         uint32_t api_flags = le32toh(ucode_capa->api_capa);
509         int i;
510
511         if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
512                 device_printf(sc->sc_dev,
513                     "capa flags index %d larger than supported by driver\n",
514                     api_index);
515                 /* don't return an error so we can load FW that has more bits */
516                 return 0;
517         }
518
519         for (i = 0; i < 32; i++) {
520                 if (api_flags & (1U << i))
521                         setbit(capa->enabled_capa, i + 32 * api_index);
522         }
523
524         return 0;
525 }
526
527 static void
528 iwm_fw_info_free(struct iwm_fw_info *fw)
529 {
530         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
531         fw->fw_fp = NULL;
532         /* don't touch fw->fw_status */
533         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
534 }
535
536 static int
537 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
538 {
539         struct iwm_fw_info *fw = &sc->sc_fw;
540         const struct iwm_tlv_ucode_header *uhdr;
541         const struct iwm_ucode_tlv *tlv;
542         struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
543         enum iwm_ucode_tlv_type tlv_type;
544         const struct firmware *fwp;
545         const uint8_t *data;
546         uint32_t tlv_len;
547         uint32_t usniffer_img;
548         const uint8_t *tlv_data;
549         uint32_t paging_mem_size;
550         int num_of_cpus;
551         int error = 0;
552         size_t len;
553
554         if (fw->fw_status == IWM_FW_STATUS_DONE &&
555             ucode_type != IWM_UCODE_INIT)
556                 return 0;
557
558         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
559                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
560         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
561
562         if (fw->fw_fp != NULL)
563                 iwm_fw_info_free(fw);
564
565         /*
566          * Load firmware into driver memory.
567          * fw_fp will be set.
568          */
569         IWM_UNLOCK(sc);
570         fwp = firmware_get(sc->cfg->fw_name);
571         IWM_LOCK(sc);
572         if (fwp == NULL) {
573                 device_printf(sc->sc_dev,
574                     "could not read firmware %s (error %d)\n",
575                     sc->cfg->fw_name, error);
576                 goto out;
577         }
578         fw->fw_fp = fwp;
579
580         /* (Re-)Initialize default values. */
581         capa->flags = 0;
582         capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
583         capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
584         memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
585         memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
586         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
587
588         /*
589          * Parse firmware contents
590          */
591
592         uhdr = (const void *)fw->fw_fp->data;
593         if (*(const uint32_t *)fw->fw_fp->data != 0
594             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
595                 device_printf(sc->sc_dev, "invalid firmware %s\n",
596                     sc->cfg->fw_name);
597                 error = EINVAL;
598                 goto out;
599         }
600
601         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
602             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
603             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
604             IWM_UCODE_API(le32toh(uhdr->ver)));
605         data = uhdr->data;
606         len = fw->fw_fp->datasize - sizeof(*uhdr);
607
608         while (len >= sizeof(*tlv)) {
609                 len -= sizeof(*tlv);
610                 tlv = (const void *)data;
611
612                 tlv_len = le32toh(tlv->length);
613                 tlv_type = le32toh(tlv->type);
614                 tlv_data = tlv->data;
615
616                 if (len < tlv_len) {
617                         device_printf(sc->sc_dev,
618                             "firmware too short: %zu bytes\n",
619                             len);
620                         error = EINVAL;
621                         goto parse_out;
622                 }
623                 len -= roundup2(tlv_len, 4);
624                 data += sizeof(tlv) + roundup2(tlv_len, 4);
625
626                 switch ((int)tlv_type) {
627                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
628                         if (tlv_len != sizeof(uint32_t)) {
629                                 device_printf(sc->sc_dev,
630                                     "%s: PROBE_MAX_LEN (%d) != sizeof(uint32_t)\n",
631                                     __func__,
632                                     (int) tlv_len);
633                                 error = EINVAL;
634                                 goto parse_out;
635                         }
636                         capa->max_probe_length =
637                             le32_to_cpup((const uint32_t *)tlv_data);
638                         /* limit it to something sensible */
639                         if (capa->max_probe_length >
640                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
641                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
642                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
643                                     "ridiculous\n", __func__);
644                                 error = EINVAL;
645                                 goto parse_out;
646                         }
647                         break;
648                 case IWM_UCODE_TLV_PAN:
649                         if (tlv_len) {
650                                 device_printf(sc->sc_dev,
651                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
652                                     __func__,
653                                     (int) tlv_len);
654                                 error = EINVAL;
655                                 goto parse_out;
656                         }
657                         capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
658                         break;
659                 case IWM_UCODE_TLV_FLAGS:
660                         if (tlv_len < sizeof(uint32_t)) {
661                                 device_printf(sc->sc_dev,
662                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
663                                     __func__,
664                                     (int) tlv_len);
665                                 error = EINVAL;
666                                 goto parse_out;
667                         }
668                         if (tlv_len % sizeof(uint32_t)) {
669                                 device_printf(sc->sc_dev,
670                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) %% sizeof(uint32_t)\n",
671                                     __func__,
672                                     (int) tlv_len);
673                                 error = EINVAL;
674                                 goto parse_out;
675                         }
676                         /*
677                          * Apparently there can be many flags, but Linux driver
678                          * parses only the first one, and so do we.
679                          *
680                          * XXX: why does this override IWM_UCODE_TLV_PAN?
681                          * Intentional or a bug?  Observations from
682                          * current firmware file:
683                          *  1) TLV_PAN is parsed first
684                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
685                          * ==> this resets TLV_PAN to itself... hnnnk
686                          */
687                         capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
688                         break;
689                 case IWM_UCODE_TLV_CSCHEME:
690                         if ((error = iwm_store_cscheme(sc,
691                             tlv_data, tlv_len)) != 0) {
692                                 device_printf(sc->sc_dev,
693                                     "%s: iwm_store_cscheme(): returned %d\n",
694                                     __func__,
695                                     error);
696                                 goto parse_out;
697                         }
698                         break;
699                 case IWM_UCODE_TLV_NUM_OF_CPU:
700                         if (tlv_len != sizeof(uint32_t)) {
701                                 device_printf(sc->sc_dev,
702                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
703                                     __func__,
704                                     (int) tlv_len);
705                                 error = EINVAL;
706                                 goto parse_out;
707                         }
708                         num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
709                         if (num_of_cpus == 2) {
710                                 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
711                                         TRUE;
712                                 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
713                                         TRUE;
714                                 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
715                                         TRUE;
716                         } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
717                                 device_printf(sc->sc_dev,
718                                     "%s: Driver supports only 1 or 2 CPUs\n",
719                                     __func__);
720                                 error = EINVAL;
721                                 goto parse_out;
722                         }
723                         break;
724                 case IWM_UCODE_TLV_SEC_RT:
725                         if ((error = iwm_firmware_store_section(sc,
726                             IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
727                                 device_printf(sc->sc_dev,
728                                     "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
729                                     __func__,
730                                     error);
731                                 goto parse_out;
732                         }
733                         break;
734                 case IWM_UCODE_TLV_SEC_INIT:
735                         if ((error = iwm_firmware_store_section(sc,
736                             IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
737                                 device_printf(sc->sc_dev,
738                                     "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
739                                     __func__,
740                                     error);
741                                 goto parse_out;
742                         }
743                         break;
744                 case IWM_UCODE_TLV_SEC_WOWLAN:
745                         if ((error = iwm_firmware_store_section(sc,
746                             IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
747                                 device_printf(sc->sc_dev,
748                                     "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
749                                     __func__,
750                                     error);
751                                 goto parse_out;
752                         }
753                         break;
754                 case IWM_UCODE_TLV_DEF_CALIB:
755                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
756                                 device_printf(sc->sc_dev,
757                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
758                                     __func__,
759                                     (int) tlv_len,
760                                     (int) sizeof(struct iwm_tlv_calib_data));
761                                 error = EINVAL;
762                                 goto parse_out;
763                         }
764                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
765                                 device_printf(sc->sc_dev,
766                                     "%s: iwm_set_default_calib() failed: %d\n",
767                                     __func__,
768                                     error);
769                                 goto parse_out;
770                         }
771                         break;
772                 case IWM_UCODE_TLV_PHY_SKU:
773                         if (tlv_len != sizeof(uint32_t)) {
774                                 error = EINVAL;
775                                 device_printf(sc->sc_dev,
776                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
777                                     __func__,
778                                     (int) tlv_len);
779                                 goto parse_out;
780                         }
781                         sc->sc_fw.phy_config =
782                             le32_to_cpup((const uint32_t *)tlv_data);
783                         sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
784                                                   IWM_FW_PHY_CFG_TX_CHAIN) >>
785                                                   IWM_FW_PHY_CFG_TX_CHAIN_POS;
786                         sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
787                                                   IWM_FW_PHY_CFG_RX_CHAIN) >>
788                                                   IWM_FW_PHY_CFG_RX_CHAIN_POS;
789                         break;
790
791                 case IWM_UCODE_TLV_API_CHANGES_SET: {
792                         if (tlv_len != sizeof(struct iwm_ucode_api)) {
793                                 error = EINVAL;
794                                 goto parse_out;
795                         }
796                         if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
797                                 error = EINVAL;
798                                 goto parse_out;
799                         }
800                         break;
801                 }
802
803                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
804                         if (tlv_len != sizeof(struct iwm_ucode_capa)) {
805                                 error = EINVAL;
806                                 goto parse_out;
807                         }
808                         if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
809                                 error = EINVAL;
810                                 goto parse_out;
811                         }
812                         break;
813                 }
814
815                 case 48: /* undocumented TLV */
816                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
817                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
818                         /* ignore, not used by current driver */
819                         break;
820
821                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
822                         if ((error = iwm_firmware_store_section(sc,
823                             IWM_UCODE_REGULAR_USNIFFER, tlv_data,
824                             tlv_len)) != 0)
825                                 goto parse_out;
826                         break;
827
828                 case IWM_UCODE_TLV_PAGING:
829                         if (tlv_len != sizeof(uint32_t)) {
830                                 error = EINVAL;
831                                 goto parse_out;
832                         }
833                         paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
834
835                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
836                             "%s: Paging: paging enabled (size = %u bytes)\n",
837                             __func__, paging_mem_size);
838                         if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
839                                 device_printf(sc->sc_dev,
840                                         "%s: Paging: driver supports up to %u bytes for paging image\n",
841                                         __func__, IWM_MAX_PAGING_IMAGE_SIZE);
842                                 error = EINVAL;
843                                 goto out;
844                         }
845                         if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
846                                 device_printf(sc->sc_dev,
847                                     "%s: Paging: image isn't multiple %u\n",
848                                     __func__, IWM_FW_PAGING_SIZE);
849                                 error = EINVAL;
850                                 goto out;
851                         }
852
853                         sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
854                             paging_mem_size;
855                         usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
856                         sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
857                             paging_mem_size;
858                         break;
859
860                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
861                         if (tlv_len != sizeof(uint32_t)) {
862                                 error = EINVAL;
863                                 goto parse_out;
864                         }
865                         capa->n_scan_channels =
866                             le32_to_cpup((const uint32_t *)tlv_data);
867                         break;
868
869                 case IWM_UCODE_TLV_FW_VERSION:
870                         if (tlv_len != sizeof(uint32_t) * 3) {
871                                 error = EINVAL;
872                                 goto parse_out;
873                         }
874                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
875                             "%d.%d.%d",
876                             le32toh(((const uint32_t *)tlv_data)[0]),
877                             le32toh(((const uint32_t *)tlv_data)[1]),
878                             le32toh(((const uint32_t *)tlv_data)[2]));
879                         break;
880
881                 case IWM_UCODE_TLV_FW_MEM_SEG:
882                         break;
883
884                 default:
885                         device_printf(sc->sc_dev,
886                             "%s: unknown firmware section %d, abort\n",
887                             __func__, tlv_type);
888                         error = EINVAL;
889                         goto parse_out;
890                 }
891         }
892
893         KASSERT(error == 0, ("unhandled error"));
894
895  parse_out:
896         if (error) {
897                 device_printf(sc->sc_dev, "firmware parse error %d, "
898                     "section type %d\n", error, tlv_type);
899         }
900
901  out:
902         if (error) {
903                 fw->fw_status = IWM_FW_STATUS_NONE;
904                 if (fw->fw_fp != NULL)
905                         iwm_fw_info_free(fw);
906         } else
907                 fw->fw_status = IWM_FW_STATUS_DONE;
908         wakeup(&sc->sc_fw);
909
910         return error;
911 }
912
913 /*
914  * DMA resource routines
915  */
916
917 /* fwmem is used to load firmware onto the card */
918 static int
919 iwm_alloc_fwmem(struct iwm_softc *sc)
920 {
921         /* Must be aligned on a 16-byte boundary. */
922         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
923             IWM_FH_MEM_TB_MAX_LENGTH, 16);
924 }
925
926 /* tx scheduler rings.  not used? */
927 static int
928 iwm_alloc_sched(struct iwm_softc *sc)
929 {
930         /* TX scheduler rings must be aligned on a 1KB boundary. */
931         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
932             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
933 }
934
935 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
936 static int
937 iwm_alloc_kw(struct iwm_softc *sc)
938 {
939         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
940 }
941
942 /* interrupt cause table */
943 static int
944 iwm_alloc_ict(struct iwm_softc *sc)
945 {
946         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
947             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
948 }
949
950 static int
951 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
952 {
953         bus_size_t size;
954         int i, error;
955
956         ring->cur = 0;
957
958         /* Allocate RX descriptors (256-byte aligned). */
959         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
960         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
961         if (error != 0) {
962                 device_printf(sc->sc_dev,
963                     "could not allocate RX ring DMA memory\n");
964                 goto fail;
965         }
966         ring->desc = ring->desc_dma.vaddr;
967
968         /* Allocate RX status area (16-byte aligned). */
969         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
970             sizeof(*ring->stat), 16);
971         if (error != 0) {
972                 device_printf(sc->sc_dev,
973                     "could not allocate RX status DMA memory\n");
974                 goto fail;
975         }
976         ring->stat = ring->stat_dma.vaddr;
977
978         /* Create RX buffer DMA tag. */
979         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
980             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
981             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
982         if (error != 0) {
983                 device_printf(sc->sc_dev,
984                     "%s: could not create RX buf DMA tag, error %d\n",
985                     __func__, error);
986                 goto fail;
987         }
988
989         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
990         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
991         if (error != 0) {
992                 device_printf(sc->sc_dev,
993                     "%s: could not create RX buf DMA map, error %d\n",
994                     __func__, error);
995                 goto fail;
996         }
997         /*
998          * Allocate and map RX buffers.
999          */
1000         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1001                 struct iwm_rx_data *data = &ring->data[i];
1002                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1003                 if (error != 0) {
1004                         device_printf(sc->sc_dev,
1005                             "%s: could not create RX buf DMA map, error %d\n",
1006                             __func__, error);
1007                         goto fail;
1008                 }
1009                 data->m = NULL;
1010
1011                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1012                         goto fail;
1013                 }
1014         }
1015         return 0;
1016
1017 fail:   iwm_free_rx_ring(sc, ring);
1018         return error;
1019 }
1020
1021 static void
1022 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1023 {
1024         /* Reset the ring state */
1025         ring->cur = 0;
1026
1027         /*
1028          * The hw rx ring index in shared memory must also be cleared,
1029          * otherwise the discrepancy can cause reprocessing chaos.
1030          */
1031         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1032 }
1033
1034 static void
1035 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1036 {
1037         int i;
1038
1039         iwm_dma_contig_free(&ring->desc_dma);
1040         iwm_dma_contig_free(&ring->stat_dma);
1041
1042         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1043                 struct iwm_rx_data *data = &ring->data[i];
1044
1045                 if (data->m != NULL) {
1046                         bus_dmamap_sync(ring->data_dmat, data->map,
1047                             BUS_DMASYNC_POSTREAD);
1048                         bus_dmamap_unload(ring->data_dmat, data->map);
1049                         m_freem(data->m);
1050                         data->m = NULL;
1051                 }
1052                 if (data->map != NULL) {
1053                         bus_dmamap_destroy(ring->data_dmat, data->map);
1054                         data->map = NULL;
1055                 }
1056         }
1057         if (ring->spare_map != NULL) {
1058                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1059                 ring->spare_map = NULL;
1060         }
1061         if (ring->data_dmat != NULL) {
1062                 bus_dma_tag_destroy(ring->data_dmat);
1063                 ring->data_dmat = NULL;
1064         }
1065 }
1066
1067 static int
1068 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1069 {
1070         bus_addr_t paddr;
1071         bus_size_t size;
1072         size_t maxsize;
1073         int nsegments;
1074         int i, error;
1075
1076         ring->qid = qid;
1077         ring->queued = 0;
1078         ring->cur = 0;
1079
1080         /* Allocate TX descriptors (256-byte aligned). */
1081         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1082         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1083         if (error != 0) {
1084                 device_printf(sc->sc_dev,
1085                     "could not allocate TX ring DMA memory\n");
1086                 goto fail;
1087         }
1088         ring->desc = ring->desc_dma.vaddr;
1089
1090         /*
1091          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1092          * to allocate commands space for other rings.
1093          */
1094         if (qid > IWM_MVM_CMD_QUEUE)
1095                 return 0;
1096
1097         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1098         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1099         if (error != 0) {
1100                 device_printf(sc->sc_dev,
1101                     "could not allocate TX cmd DMA memory\n");
1102                 goto fail;
1103         }
1104         ring->cmd = ring->cmd_dma.vaddr;
1105
1106         /* FW commands may require more mapped space than packets. */
1107         if (qid == IWM_MVM_CMD_QUEUE) {
1108                 maxsize = IWM_RBUF_SIZE;
1109                 nsegments = 1;
1110         } else {
1111                 maxsize = MCLBYTES;
1112                 nsegments = IWM_MAX_SCATTER - 2;
1113         }
1114
1115         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1116             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1117             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1118         if (error != 0) {
1119                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1120                 goto fail;
1121         }
1122
1123         paddr = ring->cmd_dma.paddr;
1124         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1125                 struct iwm_tx_data *data = &ring->data[i];
1126
1127                 data->cmd_paddr = paddr;
1128                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1129                     + offsetof(struct iwm_tx_cmd, scratch);
1130                 paddr += sizeof(struct iwm_device_cmd);
1131
1132                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1133                 if (error != 0) {
1134                         device_printf(sc->sc_dev,
1135                             "could not create TX buf DMA map\n");
1136                         goto fail;
1137                 }
1138         }
1139         KASSERT(paddr == ring->cmd_dma.paddr + size,
1140             ("invalid physical address"));
1141         return 0;
1142
1143 fail:   iwm_free_tx_ring(sc, ring);
1144         return error;
1145 }
1146
1147 static void
1148 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1149 {
1150         int i;
1151
1152         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1153                 struct iwm_tx_data *data = &ring->data[i];
1154
1155                 if (data->m != NULL) {
1156                         bus_dmamap_sync(ring->data_dmat, data->map,
1157                             BUS_DMASYNC_POSTWRITE);
1158                         bus_dmamap_unload(ring->data_dmat, data->map);
1159                         m_freem(data->m);
1160                         data->m = NULL;
1161                 }
1162         }
1163         /* Clear TX descriptors. */
1164         memset(ring->desc, 0, ring->desc_dma.size);
1165         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1166             BUS_DMASYNC_PREWRITE);
1167         sc->qfullmsk &= ~(1 << ring->qid);
1168         ring->queued = 0;
1169         ring->cur = 0;
1170
1171         if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1172                 iwm_pcie_clear_cmd_in_flight(sc);
1173 }
1174
1175 static void
1176 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1177 {
1178         int i;
1179
1180         iwm_dma_contig_free(&ring->desc_dma);
1181         iwm_dma_contig_free(&ring->cmd_dma);
1182
1183         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1184                 struct iwm_tx_data *data = &ring->data[i];
1185
1186                 if (data->m != NULL) {
1187                         bus_dmamap_sync(ring->data_dmat, data->map,
1188                             BUS_DMASYNC_POSTWRITE);
1189                         bus_dmamap_unload(ring->data_dmat, data->map);
1190                         m_freem(data->m);
1191                         data->m = NULL;
1192                 }
1193                 if (data->map != NULL) {
1194                         bus_dmamap_destroy(ring->data_dmat, data->map);
1195                         data->map = NULL;
1196                 }
1197         }
1198         if (ring->data_dmat != NULL) {
1199                 bus_dma_tag_destroy(ring->data_dmat);
1200                 ring->data_dmat = NULL;
1201         }
1202 }
1203
1204 /*
1205  * High-level hardware frobbing routines
1206  */
1207
1208 static void
1209 iwm_enable_interrupts(struct iwm_softc *sc)
1210 {
1211         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1212         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1213 }
1214
1215 static void
1216 iwm_restore_interrupts(struct iwm_softc *sc)
1217 {
1218         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1219 }
1220
1221 static void
1222 iwm_disable_interrupts(struct iwm_softc *sc)
1223 {
1224         /* disable interrupts */
1225         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1226
1227         /* acknowledge all interrupts */
1228         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1229         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1230 }
1231
1232 static void
1233 iwm_ict_reset(struct iwm_softc *sc)
1234 {
1235         iwm_disable_interrupts(sc);
1236
1237         /* Reset ICT table. */
1238         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1239         sc->ict_cur = 0;
1240
1241         /* Set physical address of ICT table (4KB aligned). */
1242         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1243             IWM_CSR_DRAM_INT_TBL_ENABLE
1244             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1245             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1246             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1247
1248         /* Switch to ICT interrupt mode in driver. */
1249         sc->sc_flags |= IWM_FLAG_USE_ICT;
1250
1251         /* Re-enable interrupts. */
1252         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1253         iwm_enable_interrupts(sc);
1254 }
1255
1256 /* iwlwifi pcie/trans.c */
1257
1258 /*
1259  * Since this .. hard-resets things, it's time to actually
1260  * mark the first vap (if any) as having no mac context.
1261  * It's annoying, but since the driver is potentially being
1262  * stop/start'ed whilst active (thanks openbsd port!) we
1263  * have to correctly track this.
1264  */
1265 static void
1266 iwm_stop_device(struct iwm_softc *sc)
1267 {
1268         struct ieee80211com *ic = &sc->sc_ic;
1269         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1270         int chnl, qid;
1271         uint32_t mask = 0;
1272
1273         /* tell the device to stop sending interrupts */
1274         iwm_disable_interrupts(sc);
1275
1276         /*
1277          * FreeBSD-local: mark the first vap as not-uploaded,
1278          * so the next transition through auth/assoc
1279          * will correctly populate the MAC context.
1280          */
1281         if (vap) {
1282                 struct iwm_vap *iv = IWM_VAP(vap);
1283                 iv->phy_ctxt = NULL;
1284                 iv->is_uploaded = 0;
1285         }
1286
1287         /* device going down, Stop using ICT table */
1288         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1289
1290         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1291
1292         if (iwm_nic_lock(sc)) {
1293                 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1294
1295                 /* Stop each Tx DMA channel */
1296                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1297                         IWM_WRITE(sc,
1298                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1299                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1300                 }
1301
1302                 /* Wait for DMA channels to be idle */
1303                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1304                     5000)) {
1305                         device_printf(sc->sc_dev,
1306                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1307                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1308                 }
1309                 iwm_nic_unlock(sc);
1310         }
1311         iwm_pcie_rx_stop(sc);
1312
1313         /* Stop RX ring. */
1314         iwm_reset_rx_ring(sc, &sc->rxq);
1315
1316         /* Reset all TX rings. */
1317         for (qid = 0; qid < nitems(sc->txq); qid++)
1318                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1319
1320         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1321                 /* Power-down device's busmaster DMA clocks */
1322                 if (iwm_nic_lock(sc)) {
1323                         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1324                             IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1325                         iwm_nic_unlock(sc);
1326                 }
1327                 DELAY(5);
1328         }
1329
1330         /* Make sure (redundant) we've released our request to stay awake */
1331         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1332             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1333
1334         /* Stop the device, and put it in low power state */
1335         iwm_apm_stop(sc);
1336
1337         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1338          * Clean again the interrupt here
1339          */
1340         iwm_disable_interrupts(sc);
1341         /* stop and reset the on-board processor */
1342         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1343
1344         /*
1345          * Even if we stop the HW, we still want the RF kill
1346          * interrupt
1347          */
1348         iwm_enable_rfkill_int(sc);
1349         iwm_check_rfkill(sc);
1350 }
1351
1352 /* iwlwifi: mvm/ops.c */
1353 static void
1354 iwm_mvm_nic_config(struct iwm_softc *sc)
1355 {
1356         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1357         uint32_t reg_val = 0;
1358         uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1359
1360         radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1361             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1362         radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1363             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1364         radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1365             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1366
1367         /* SKU control */
1368         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1369             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1370         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1371             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1372
1373         /* radio configuration */
1374         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1375         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1376         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1377
1378         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1379
1380         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1381             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1382             radio_cfg_step, radio_cfg_dash);
1383
1384         /*
1385          * W/A : NIC is stuck in a reset state after Early PCIe power off
1386          * (PCIe power is lost before PERST# is asserted), causing ME FW
1387          * to lose ownership and not being able to obtain it back.
1388          */
1389         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1390                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1391                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1392                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1393         }
1394 }
1395
1396 static int
1397 iwm_nic_rx_init(struct iwm_softc *sc)
1398 {
1399         /*
1400          * Initialize RX ring.  This is from the iwn driver.
1401          */
1402         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1403
1404         /* Stop Rx DMA */
1405         iwm_pcie_rx_stop(sc);
1406
1407         if (!iwm_nic_lock(sc))
1408                 return EBUSY;
1409
1410         /* reset and flush pointers */
1411         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1412         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1413         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1414         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1415
1416         /* Set physical address of RX ring (256-byte aligned). */
1417         IWM_WRITE(sc,
1418             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1419
1420         /* Set physical address of RX status (16-byte aligned). */
1421         IWM_WRITE(sc,
1422             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1423
1424         /* Enable Rx DMA
1425          * XXX 5000 HW isn't supported by the iwm(4) driver.
1426          * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1427          *      the credit mechanism in 5000 HW RX FIFO
1428          * Direct rx interrupts to hosts
1429          * Rx buffer size 4 or 8k or 12k
1430          * RB timeout 0x10
1431          * 256 RBDs
1432          */
1433         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1434             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1435             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1436             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1437             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1438             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1439             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1440
1441         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1442
1443         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1444         if (sc->cfg->host_interrupt_operation_mode)
1445                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1446
1447         /*
1448          * Thus sayeth el jefe (iwlwifi) via a comment:
1449          *
1450          * This value should initially be 0 (before preparing any
1451          * RBs), should be 8 after preparing the first 8 RBs (for example)
1452          */
1453         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1454
1455         iwm_nic_unlock(sc);
1456
1457         return 0;
1458 }
1459
1460 static int
1461 iwm_nic_tx_init(struct iwm_softc *sc)
1462 {
1463         int qid;
1464
1465         if (!iwm_nic_lock(sc))
1466                 return EBUSY;
1467
1468         /* Deactivate TX scheduler. */
1469         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1470
1471         /* Set physical address of "keep warm" page (16-byte aligned). */
1472         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1473
1474         /* Initialize TX rings. */
1475         for (qid = 0; qid < nitems(sc->txq); qid++) {
1476                 struct iwm_tx_ring *txq = &sc->txq[qid];
1477
1478                 /* Set physical address of TX ring (256-byte aligned). */
1479                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1480                     txq->desc_dma.paddr >> 8);
1481                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1482                     "%s: loading ring %d descriptors (%p) at %lx\n",
1483                     __func__,
1484                     qid, txq->desc,
1485                     (unsigned long) (txq->desc_dma.paddr >> 8));
1486         }
1487
1488         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1489
1490         iwm_nic_unlock(sc);
1491
1492         return 0;
1493 }
1494
1495 static int
1496 iwm_nic_init(struct iwm_softc *sc)
1497 {
1498         int error;
1499
1500         iwm_apm_init(sc);
1501         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1502                 iwm_set_pwr(sc);
1503
1504         iwm_mvm_nic_config(sc);
1505
1506         if ((error = iwm_nic_rx_init(sc)) != 0)
1507                 return error;
1508
1509         /*
1510          * Ditto for TX, from iwn
1511          */
1512         if ((error = iwm_nic_tx_init(sc)) != 0)
1513                 return error;
1514
1515         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1516             "%s: shadow registers enabled\n", __func__);
1517         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1518
1519         return 0;
1520 }
1521
1522 int
1523 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1524 {
1525         if (!iwm_nic_lock(sc)) {
1526                 device_printf(sc->sc_dev,
1527                     "%s: cannot enable txq %d\n",
1528                     __func__,
1529                     qid);
1530                 return EBUSY;
1531         }
1532
1533         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1534
1535         if (qid == IWM_MVM_CMD_QUEUE) {
1536                 /* unactivate before configuration */
1537                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1538                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1539                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1540
1541                 iwm_nic_unlock(sc);
1542
1543                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1544
1545                 if (!iwm_nic_lock(sc)) {
1546                         device_printf(sc->sc_dev,
1547                             "%s: cannot enable txq %d\n", __func__, qid);
1548                         return EBUSY;
1549                 }
1550                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1551                 iwm_nic_unlock(sc);
1552
1553                 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1554                 /* Set scheduler window size and frame limit. */
1555                 iwm_write_mem32(sc,
1556                     sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1557                     sizeof(uint32_t),
1558                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1559                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1560                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1561                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1562
1563                 if (!iwm_nic_lock(sc)) {
1564                         device_printf(sc->sc_dev,
1565                             "%s: cannot enable txq %d\n", __func__, qid);
1566                         return EBUSY;
1567                 }
1568                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1569                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1570                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1571                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1572                     IWM_SCD_QUEUE_STTS_REG_MSK);
1573         } else {
1574                 struct iwm_scd_txq_cfg_cmd cmd;
1575                 int error;
1576
1577                 iwm_nic_unlock(sc);
1578
1579                 memset(&cmd, 0, sizeof(cmd));
1580                 cmd.scd_queue = qid;
1581                 cmd.enable = 1;
1582                 cmd.sta_id = sta_id;
1583                 cmd.tx_fifo = fifo;
1584                 cmd.aggregate = 0;
1585                 cmd.window = IWM_FRAME_LIMIT;
1586
1587                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1588                     sizeof(cmd), &cmd);
1589                 if (error) {
1590                         device_printf(sc->sc_dev,
1591                             "cannot enable txq %d\n", qid);
1592                         return error;
1593                 }
1594
1595                 if (!iwm_nic_lock(sc))
1596                         return EBUSY;
1597         }
1598
1599         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1600             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1601
1602         iwm_nic_unlock(sc);
1603
1604         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1605             __func__, qid, fifo);
1606
1607         return 0;
1608 }
1609
1610 static int
1611 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1612 {
1613         int error, chnl;
1614
1615         int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1616             IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1617
1618         if (!iwm_nic_lock(sc))
1619                 return EBUSY;
1620
1621         iwm_ict_reset(sc);
1622
1623         sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1624         if (scd_base_addr != 0 &&
1625             scd_base_addr != sc->scd_base_addr) {
1626                 device_printf(sc->sc_dev,
1627                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1628                     __func__, sc->scd_base_addr, scd_base_addr);
1629         }
1630
1631         iwm_nic_unlock(sc);
1632
1633         /* reset context data, TX status and translation data */
1634         error = iwm_write_mem(sc,
1635             sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1636             NULL, clear_dwords);
1637         if (error)
1638                 return EBUSY;
1639
1640         if (!iwm_nic_lock(sc))
1641                 return EBUSY;
1642
1643         /* Set physical address of TX scheduler rings (1KB aligned). */
1644         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1645
1646         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1647
1648         iwm_nic_unlock(sc);
1649
1650         /* enable command channel */
1651         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1652         if (error)
1653                 return error;
1654
1655         if (!iwm_nic_lock(sc))
1656                 return EBUSY;
1657
1658         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1659
1660         /* Enable DMA channels. */
1661         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1662                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1663                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1664                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1665         }
1666
1667         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1668             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1669
1670         iwm_nic_unlock(sc);
1671
1672         /* Enable L1-Active */
1673         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1674                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1675                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1676         }
1677
1678         return error;
1679 }
1680
1681 /*
1682  * NVM read access and content parsing.  We do not support
1683  * external NVM or writing NVM.
1684  * iwlwifi/mvm/nvm.c
1685  */
1686
1687 /* Default NVM size to read */
1688 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1689
1690 #define IWM_NVM_WRITE_OPCODE 1
1691 #define IWM_NVM_READ_OPCODE 0
1692
1693 /* load nvm chunk response */
1694 enum {
1695         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1696         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1697 };
1698
1699 static int
1700 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1701         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1702 {
1703         struct iwm_nvm_access_cmd nvm_access_cmd = {
1704                 .offset = htole16(offset),
1705                 .length = htole16(length),
1706                 .type = htole16(section),
1707                 .op_code = IWM_NVM_READ_OPCODE,
1708         };
1709         struct iwm_nvm_access_resp *nvm_resp;
1710         struct iwm_rx_packet *pkt;
1711         struct iwm_host_cmd cmd = {
1712                 .id = IWM_NVM_ACCESS_CMD,
1713                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1714                 .data = { &nvm_access_cmd, },
1715         };
1716         int ret, bytes_read, offset_read;
1717         uint8_t *resp_data;
1718
1719         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1720
1721         ret = iwm_send_cmd(sc, &cmd);
1722         if (ret) {
1723                 device_printf(sc->sc_dev,
1724                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1725                 return ret;
1726         }
1727
1728         pkt = cmd.resp_pkt;
1729
1730         /* Extract NVM response */
1731         nvm_resp = (void *)pkt->data;
1732         ret = le16toh(nvm_resp->status);
1733         bytes_read = le16toh(nvm_resp->length);
1734         offset_read = le16toh(nvm_resp->offset);
1735         resp_data = nvm_resp->data;
1736         if (ret) {
1737                 if ((offset != 0) &&
1738                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1739                         /*
1740                          * meaning of NOT_VALID_ADDRESS:
1741                          * driver try to read chunk from address that is
1742                          * multiple of 2K and got an error since addr is empty.
1743                          * meaning of (offset != 0): driver already
1744                          * read valid data from another chunk so this case
1745                          * is not an error.
1746                          */
1747                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1748                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1749                                     offset);
1750                         *len = 0;
1751                         ret = 0;
1752                 } else {
1753                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1754                                     "NVM access command failed with status %d\n", ret);
1755                         ret = EIO;
1756                 }
1757                 goto exit;
1758         }
1759
1760         if (offset_read != offset) {
1761                 device_printf(sc->sc_dev,
1762                     "NVM ACCESS response with invalid offset %d\n",
1763                     offset_read);
1764                 ret = EINVAL;
1765                 goto exit;
1766         }
1767
1768         if (bytes_read > length) {
1769                 device_printf(sc->sc_dev,
1770                     "NVM ACCESS response with too much data "
1771                     "(%d bytes requested, %d bytes received)\n",
1772                     length, bytes_read);
1773                 ret = EINVAL;
1774                 goto exit;
1775         }
1776
1777         /* Write data to NVM */
1778         memcpy(data + offset, resp_data, bytes_read);
1779         *len = bytes_read;
1780
1781  exit:
1782         iwm_free_resp(sc, &cmd);
1783         return ret;
1784 }
1785
1786 /*
1787  * Reads an NVM section completely.
1788  * NICs prior to 7000 family don't have a real NVM, but just read
1789  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1790  * by uCode, we need to manually check in this case that we don't
1791  * overflow and try to read more than the EEPROM size.
1792  * For 7000 family NICs, we supply the maximal size we can read, and
1793  * the uCode fills the response with as much data as we can,
1794  * without overflowing, so no check is needed.
1795  */
1796 static int
1797 iwm_nvm_read_section(struct iwm_softc *sc,
1798         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1799 {
1800         uint16_t seglen, length, offset = 0;
1801         int ret;
1802
1803         /* Set nvm section read length */
1804         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1805
1806         seglen = length;
1807
1808         /* Read the NVM until exhausted (reading less than requested) */
1809         while (seglen == length) {
1810                 /* Check no memory assumptions fail and cause an overflow */
1811                 if ((size_read + offset + length) >
1812                     sc->cfg->eeprom_size) {
1813                         device_printf(sc->sc_dev,
1814                             "EEPROM size is too small for NVM\n");
1815                         return ENOBUFS;
1816                 }
1817
1818                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1819                 if (ret) {
1820                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1821                                     "Cannot read NVM from section %d offset %d, length %d\n",
1822                                     section, offset, length);
1823                         return ret;
1824                 }
1825                 offset += seglen;
1826         }
1827
1828         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1829                     "NVM section %d read completed\n", section);
1830         *len = offset;
1831         return 0;
1832 }
1833
1834 /*
1835  * BEGIN IWM_NVM_PARSE
1836  */
1837
1838 /* iwlwifi/iwl-nvm-parse.c */
1839
1840 /* NVM offsets (in words) definitions */
1841 enum iwm_nvm_offsets {
1842         /* NVM HW-Section offset (in words) definitions */
1843         IWM_HW_ADDR = 0x15,
1844
1845 /* NVM SW-Section offset (in words) definitions */
1846         IWM_NVM_SW_SECTION = 0x1C0,
1847         IWM_NVM_VERSION = 0,
1848         IWM_RADIO_CFG = 1,
1849         IWM_SKU = 2,
1850         IWM_N_HW_ADDRS = 3,
1851         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1852
1853 /* NVM calibration section offset (in words) definitions */
1854         IWM_NVM_CALIB_SECTION = 0x2B8,
1855         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1856 };
1857
1858 enum iwm_8000_nvm_offsets {
1859         /* NVM HW-Section offset (in words) definitions */
1860         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1861         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1862         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1863         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1864         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1865
1866         /* NVM SW-Section offset (in words) definitions */
1867         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1868         IWM_NVM_VERSION_8000 = 0,
1869         IWM_RADIO_CFG_8000 = 0,
1870         IWM_SKU_8000 = 2,
1871         IWM_N_HW_ADDRS_8000 = 3,
1872
1873         /* NVM REGULATORY -Section offset (in words) definitions */
1874         IWM_NVM_CHANNELS_8000 = 0,
1875         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1876         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1877         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1878
1879         /* NVM calibration section offset (in words) definitions */
1880         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1881         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1882 };
1883
1884 /* SKU Capabilities (actual values from NVM definition) */
1885 enum nvm_sku_bits {
1886         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1887         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1888         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1889         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1890 };
1891
1892 /* radio config bits (actual values from NVM definition) */
1893 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1894 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1895 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1896 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1897 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1898 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1899
1900 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1901 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1902 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1903 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1904 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1905 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1906
1907 #define DEFAULT_MAX_TX_POWER 16
1908
1909 /**
1910  * enum iwm_nvm_channel_flags - channel flags in NVM
1911  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1912  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1913  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1914  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1915  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1916  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1917  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1918  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1919  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1920  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1921  */
1922 enum iwm_nvm_channel_flags {
1923         IWM_NVM_CHANNEL_VALID = (1 << 0),
1924         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1925         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1926         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1927         IWM_NVM_CHANNEL_DFS = (1 << 7),
1928         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1929         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1930         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1931         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1932 };
1933
1934 /*
1935  * Translate EEPROM flags to net80211.
1936  */
1937 static uint32_t
1938 iwm_eeprom_channel_flags(uint16_t ch_flags)
1939 {
1940         uint32_t nflags;
1941
1942         nflags = 0;
1943         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1944                 nflags |= IEEE80211_CHAN_PASSIVE;
1945         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1946                 nflags |= IEEE80211_CHAN_NOADHOC;
1947         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1948                 nflags |= IEEE80211_CHAN_DFS;
1949                 /* Just in case. */
1950                 nflags |= IEEE80211_CHAN_NOADHOC;
1951         }
1952
1953         return (nflags);
1954 }
1955
1956 static void
1957 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1958     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1959     const uint8_t bands[])
1960 {
1961         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1962         uint32_t nflags;
1963         uint16_t ch_flags;
1964         uint8_t ieee;
1965         int error;
1966
1967         for (; ch_idx < ch_num; ch_idx++) {
1968                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1969                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1970                         ieee = iwm_nvm_channels[ch_idx];
1971                 else
1972                         ieee = iwm_nvm_channels_8000[ch_idx];
1973
1974                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1975                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1976                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
1977                             ieee, ch_flags,
1978                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1979                             "5.2" : "2.4");
1980                         continue;
1981                 }
1982
1983                 nflags = iwm_eeprom_channel_flags(ch_flags);
1984                 error = ieee80211_add_channel(chans, maxchans, nchans,
1985                     ieee, 0, 0, nflags, bands);
1986                 if (error != 0)
1987                         break;
1988
1989                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1990                     "Ch. %d Flags %x [%sGHz] - Added\n",
1991                     ieee, ch_flags,
1992                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1993                     "5.2" : "2.4");
1994         }
1995 }
1996
1997 static void
1998 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1999     struct ieee80211_channel chans[])
2000 {
2001         struct iwm_softc *sc = ic->ic_softc;
2002         struct iwm_nvm_data *data = sc->nvm_data;
2003         uint8_t bands[IEEE80211_MODE_BYTES];
2004         size_t ch_num;
2005
2006         memset(bands, 0, sizeof(bands));
2007         /* 1-13: 11b/g channels. */
2008         setbit(bands, IEEE80211_MODE_11B);
2009         setbit(bands, IEEE80211_MODE_11G);
2010         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2011             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2012
2013         /* 14: 11b channel only. */
2014         clrbit(bands, IEEE80211_MODE_11G);
2015         iwm_add_channel_band(sc, chans, maxchans, nchans,
2016             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2017
2018         if (data->sku_cap_band_52GHz_enable) {
2019                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2020                         ch_num = nitems(iwm_nvm_channels);
2021                 else
2022                         ch_num = nitems(iwm_nvm_channels_8000);
2023                 memset(bands, 0, sizeof(bands));
2024                 setbit(bands, IEEE80211_MODE_11A);
2025                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2026                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2027         }
2028 }
2029
2030 static void
2031 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2032         const uint16_t *mac_override, const uint16_t *nvm_hw)
2033 {
2034         const uint8_t *hw_addr;
2035
2036         if (mac_override) {
2037                 static const uint8_t reserved_mac[] = {
2038                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2039                 };
2040
2041                 hw_addr = (const uint8_t *)(mac_override +
2042                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2043
2044                 /*
2045                  * Store the MAC address from MAO section.
2046                  * No byte swapping is required in MAO section
2047                  */
2048                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2049
2050                 /*
2051                  * Force the use of the OTP MAC address in case of reserved MAC
2052                  * address in the NVM, or if address is given but invalid.
2053                  */
2054                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2055                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2056                     iwm_is_valid_ether_addr(data->hw_addr) &&
2057                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2058                         return;
2059
2060                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2061                     "%s: mac address from nvm override section invalid\n",
2062                     __func__);
2063         }
2064
2065         if (nvm_hw) {
2066                 /* read the mac address from WFMP registers */
2067                 uint32_t mac_addr0 =
2068                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2069                 uint32_t mac_addr1 =
2070                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2071
2072                 hw_addr = (const uint8_t *)&mac_addr0;
2073                 data->hw_addr[0] = hw_addr[3];
2074                 data->hw_addr[1] = hw_addr[2];
2075                 data->hw_addr[2] = hw_addr[1];
2076                 data->hw_addr[3] = hw_addr[0];
2077
2078                 hw_addr = (const uint8_t *)&mac_addr1;
2079                 data->hw_addr[4] = hw_addr[1];
2080                 data->hw_addr[5] = hw_addr[0];
2081
2082                 return;
2083         }
2084
2085         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2086         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2087 }
2088
2089 static int
2090 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2091             const uint16_t *phy_sku)
2092 {
2093         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2094                 return le16_to_cpup(nvm_sw + IWM_SKU);
2095
2096         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2097 }
2098
2099 static int
2100 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2101 {
2102         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2103                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2104         else
2105                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2106                                                 IWM_NVM_VERSION_8000));
2107 }
2108
2109 static int
2110 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2111                   const uint16_t *phy_sku)
2112 {
2113         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2114                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2115
2116         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2117 }
2118
2119 static int
2120 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2121 {
2122         int n_hw_addr;
2123
2124         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2125                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2126
2127         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2128
2129         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2130 }
2131
2132 static void
2133 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2134                   uint32_t radio_cfg)
2135 {
2136         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2137                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2138                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2139                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2140                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2141                 return;
2142         }
2143
2144         /* set the radio configuration for family 8000 */
2145         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2146         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2147         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2148         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2149         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2150         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2151 }
2152
2153 static int
2154 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2155                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2156 {
2157 #ifdef notyet /* for FAMILY 9000 */
2158         if (cfg->mac_addr_from_csr) {
2159                 iwm_set_hw_address_from_csr(sc, data);
2160         } else
2161 #endif
2162         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2163                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2164
2165                 /* The byte order is little endian 16 bit, meaning 214365 */
2166                 data->hw_addr[0] = hw_addr[1];
2167                 data->hw_addr[1] = hw_addr[0];
2168                 data->hw_addr[2] = hw_addr[3];
2169                 data->hw_addr[3] = hw_addr[2];
2170                 data->hw_addr[4] = hw_addr[5];
2171                 data->hw_addr[5] = hw_addr[4];
2172         } else {
2173                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2174         }
2175
2176         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2177                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2178                 return EINVAL;
2179         }
2180
2181         return 0;
2182 }
2183
2184 static struct iwm_nvm_data *
2185 iwm_parse_nvm_data(struct iwm_softc *sc,
2186                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2187                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2188                    const uint16_t *phy_sku, const uint16_t *regulatory)
2189 {
2190         struct iwm_nvm_data *data;
2191         uint32_t sku, radio_cfg;
2192
2193         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2194                 data = malloc(sizeof(*data) +
2195                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2196                     M_DEVBUF, M_NOWAIT | M_ZERO);
2197         } else {
2198                 data = malloc(sizeof(*data) +
2199                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2200                     M_DEVBUF, M_NOWAIT | M_ZERO);
2201         }
2202         if (!data)
2203                 return NULL;
2204
2205         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2206
2207         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2208         iwm_set_radio_cfg(sc, data, radio_cfg);
2209
2210         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2211         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2212         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2213         data->sku_cap_11n_enable = 0;
2214
2215         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2216
2217         /* If no valid mac address was found - bail out */
2218         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2219                 free(data, M_DEVBUF);
2220                 return NULL;
2221         }
2222
2223         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2224                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2225                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2226         } else {
2227                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2228                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2229         }
2230
2231         return data;
2232 }
2233
2234 static void
2235 iwm_free_nvm_data(struct iwm_nvm_data *data)
2236 {
2237         if (data != NULL)
2238                 free(data, M_DEVBUF);
2239 }
2240
2241 static struct iwm_nvm_data *
2242 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2243 {
2244         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2245
2246         /* Checking for required sections */
2247         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2248                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2249                     !sections[sc->cfg->nvm_hw_section_num].data) {
2250                         device_printf(sc->sc_dev,
2251                             "Can't parse empty OTP/NVM sections\n");
2252                         return NULL;
2253                 }
2254         } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2255                 /* SW and REGULATORY sections are mandatory */
2256                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2257                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2258                         device_printf(sc->sc_dev,
2259                             "Can't parse empty OTP/NVM sections\n");
2260                         return NULL;
2261                 }
2262                 /* MAC_OVERRIDE or at least HW section must exist */
2263                 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2264                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2265                         device_printf(sc->sc_dev,
2266                             "Can't parse mac_address, empty sections\n");
2267                         return NULL;
2268                 }
2269
2270                 /* PHY_SKU section is mandatory in B0 */
2271                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2272                         device_printf(sc->sc_dev,
2273                             "Can't parse phy_sku in B0, empty sections\n");
2274                         return NULL;
2275                 }
2276         } else {
2277                 panic("unknown device family %d\n", sc->cfg->device_family);
2278         }
2279
2280         hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2281         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2282         calib = (const uint16_t *)
2283             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2284         regulatory = (const uint16_t *)
2285             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2286         mac_override = (const uint16_t *)
2287             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2288         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2289
2290         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2291             phy_sku, regulatory);
2292 }
2293
2294 static int
2295 iwm_nvm_init(struct iwm_softc *sc)
2296 {
2297         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2298         int i, ret, section;
2299         uint32_t size_read = 0;
2300         uint8_t *nvm_buffer, *temp;
2301         uint16_t len;
2302
2303         memset(nvm_sections, 0, sizeof(nvm_sections));
2304
2305         if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2306                 return EINVAL;
2307
2308         /* load NVM values from nic */
2309         /* Read From FW NVM */
2310         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2311
2312         nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2313         if (!nvm_buffer)
2314                 return ENOMEM;
2315         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2316                 /* we override the constness for initial read */
2317                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2318                                            &len, size_read);
2319                 if (ret)
2320                         continue;
2321                 size_read += len;
2322                 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2323                 if (!temp) {
2324                         ret = ENOMEM;
2325                         break;
2326                 }
2327                 memcpy(temp, nvm_buffer, len);
2328
2329                 nvm_sections[section].data = temp;
2330                 nvm_sections[section].length = len;
2331         }
2332         if (!size_read)
2333                 device_printf(sc->sc_dev, "OTP is blank\n");
2334         free(nvm_buffer, M_DEVBUF);
2335
2336         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2337         if (!sc->nvm_data)
2338                 return EINVAL;
2339         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2340                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2341
2342         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2343                 if (nvm_sections[i].data != NULL)
2344                         free(nvm_sections[i].data, M_DEVBUF);
2345         }
2346
2347         return 0;
2348 }
2349
2350 static int
2351 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2352         const struct iwm_fw_desc *section)
2353 {
2354         struct iwm_dma_info *dma = &sc->fw_dma;
2355         uint8_t *v_addr;
2356         bus_addr_t p_addr;
2357         uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2358         int ret = 0;
2359
2360         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2361                     "%s: [%d] uCode section being loaded...\n",
2362                     __func__, section_num);
2363
2364         v_addr = dma->vaddr;
2365         p_addr = dma->paddr;
2366
2367         for (offset = 0; offset < section->len; offset += chunk_sz) {
2368                 uint32_t copy_size, dst_addr;
2369                 int extended_addr = FALSE;
2370
2371                 copy_size = MIN(chunk_sz, section->len - offset);
2372                 dst_addr = section->offset + offset;
2373
2374                 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2375                     dst_addr <= IWM_FW_MEM_EXTENDED_END)
2376                         extended_addr = TRUE;
2377
2378                 if (extended_addr)
2379                         iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2380                                           IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2381
2382                 memcpy(v_addr, (const uint8_t *)section->data + offset,
2383                     copy_size);
2384                 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2385                 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2386                                                    copy_size);
2387
2388                 if (extended_addr)
2389                         iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2390                                             IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2391
2392                 if (ret) {
2393                         device_printf(sc->sc_dev,
2394                             "%s: Could not load the [%d] uCode section\n",
2395                             __func__, section_num);
2396                         break;
2397                 }
2398         }
2399
2400         return ret;
2401 }
2402
2403 /*
2404  * ucode
2405  */
2406 static int
2407 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2408                              bus_addr_t phy_addr, uint32_t byte_cnt)
2409 {
2410         int ret;
2411
2412         sc->sc_fw_chunk_done = 0;
2413
2414         if (!iwm_nic_lock(sc))
2415                 return EBUSY;
2416
2417         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2418             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2419
2420         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2421             dst_addr);
2422
2423         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2424             phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2425
2426         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2427             (iwm_get_dma_hi_addr(phy_addr)
2428              << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2429
2430         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2431             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2432             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2433             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2434
2435         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2436             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2437             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2438             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2439
2440         iwm_nic_unlock(sc);
2441
2442         /* wait up to 5s for this segment to load */
2443         ret = 0;
2444         while (!sc->sc_fw_chunk_done) {
2445                 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2446                 if (ret)
2447                         break;
2448         }
2449
2450         if (ret != 0) {
2451                 device_printf(sc->sc_dev,
2452                     "fw chunk addr 0x%x len %d failed to load\n",
2453                     dst_addr, byte_cnt);
2454                 return ETIMEDOUT;
2455         }
2456
2457         return 0;
2458 }
2459
2460 static int
2461 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2462         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2463 {
2464         int shift_param;
2465         int i, ret = 0, sec_num = 0x1;
2466         uint32_t val, last_read_idx = 0;
2467
2468         if (cpu == 1) {
2469                 shift_param = 0;
2470                 *first_ucode_section = 0;
2471         } else {
2472                 shift_param = 16;
2473                 (*first_ucode_section)++;
2474         }
2475
2476         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2477                 last_read_idx = i;
2478
2479                 /*
2480                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2481                  * CPU1 to CPU2.
2482                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2483                  * CPU2 non paged to CPU2 paging sec.
2484                  */
2485                 if (!image->fw_sect[i].data ||
2486                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2487                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2488                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2489                                     "Break since Data not valid or Empty section, sec = %d\n",
2490                                     i);
2491                         break;
2492                 }
2493                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2494                 if (ret)
2495                         return ret;
2496
2497                 /* Notify the ucode of the loaded section number and status */
2498                 if (iwm_nic_lock(sc)) {
2499                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2500                         val = val | (sec_num << shift_param);
2501                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2502                         sec_num = (sec_num << 1) | 0x1;
2503                         iwm_nic_unlock(sc);
2504                 }
2505         }
2506
2507         *first_ucode_section = last_read_idx;
2508
2509         iwm_enable_interrupts(sc);
2510
2511         if (iwm_nic_lock(sc)) {
2512                 if (cpu == 1)
2513                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2514                 else
2515                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2516                 iwm_nic_unlock(sc);
2517         }
2518
2519         return 0;
2520 }
2521
2522 static int
2523 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2524         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2525 {
2526         int shift_param;
2527         int i, ret = 0;
2528         uint32_t last_read_idx = 0;
2529
2530         if (cpu == 1) {
2531                 shift_param = 0;
2532                 *first_ucode_section = 0;
2533         } else {
2534                 shift_param = 16;
2535                 (*first_ucode_section)++;
2536         }
2537
2538         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2539                 last_read_idx = i;
2540
2541                 /*
2542                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2543                  * CPU1 to CPU2.
2544                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2545                  * CPU2 non paged to CPU2 paging sec.
2546                  */
2547                 if (!image->fw_sect[i].data ||
2548                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2549                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2550                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2551                                     "Break since Data not valid or Empty section, sec = %d\n",
2552                                      i);
2553                         break;
2554                 }
2555
2556                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2557                 if (ret)
2558                         return ret;
2559         }
2560
2561         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2562                 iwm_set_bits_prph(sc,
2563                                   IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2564                                   (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2565                                    IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2566                                    IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2567                                         shift_param);
2568
2569         *first_ucode_section = last_read_idx;
2570
2571         return 0;
2572
2573 }
2574
2575 static int
2576 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2577         const struct iwm_fw_sects *image)
2578 {
2579         int ret = 0;
2580         int first_ucode_section;
2581
2582         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2583                      image->is_dual_cpus ? "Dual" : "Single");
2584
2585         /* load to FW the binary non secured sections of CPU1 */
2586         ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2587         if (ret)
2588                 return ret;
2589
2590         if (image->is_dual_cpus) {
2591                 /* set CPU2 header address */
2592                 if (iwm_nic_lock(sc)) {
2593                         iwm_write_prph(sc,
2594                                        IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2595                                        IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2596                         iwm_nic_unlock(sc);
2597                 }
2598
2599                 /* load to FW the binary sections of CPU2 */
2600                 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2601                                                  &first_ucode_section);
2602                 if (ret)
2603                         return ret;
2604         }
2605
2606         iwm_enable_interrupts(sc);
2607
2608         /* release CPU reset */
2609         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2610
2611         return 0;
2612 }
2613
2614 int
2615 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2616         const struct iwm_fw_sects *image)
2617 {
2618         int ret = 0;
2619         int first_ucode_section;
2620
2621         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2622                     image->is_dual_cpus ? "Dual" : "Single");
2623
2624         /* configure the ucode to be ready to get the secured image */
2625         /* release CPU reset */
2626         if (iwm_nic_lock(sc)) {
2627                 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2628                     IWM_RELEASE_CPU_RESET_BIT);
2629                 iwm_nic_unlock(sc);
2630         }
2631
2632         /* load to FW the binary Secured sections of CPU1 */
2633         ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2634             &first_ucode_section);
2635         if (ret)
2636                 return ret;
2637
2638         /* load to FW the binary sections of CPU2 */
2639         return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2640             &first_ucode_section);
2641 }
2642
2643 /* XXX Get rid of this definition */
2644 static inline void
2645 iwm_enable_fw_load_int(struct iwm_softc *sc)
2646 {
2647         IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2648         sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2649         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2650 }
2651
2652 /* XXX Add proper rfkill support code */
2653 static int
2654 iwm_start_fw(struct iwm_softc *sc,
2655         const struct iwm_fw_sects *fw)
2656 {
2657         int ret;
2658
2659         /* This may fail if AMT took ownership of the device */
2660         if (iwm_prepare_card_hw(sc)) {
2661                 device_printf(sc->sc_dev,
2662                     "%s: Exit HW not ready\n", __func__);
2663                 ret = EIO;
2664                 goto out;
2665         }
2666
2667         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2668
2669         iwm_disable_interrupts(sc);
2670
2671         /* make sure rfkill handshake bits are cleared */
2672         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2673         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2674             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2675
2676         /* clear (again), then enable host interrupts */
2677         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2678
2679         ret = iwm_nic_init(sc);
2680         if (ret) {
2681                 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2682                 goto out;
2683         }
2684
2685         /*
2686          * Now, we load the firmware and don't want to be interrupted, even
2687          * by the RF-Kill interrupt (hence mask all the interrupt besides the
2688          * FH_TX interrupt which is needed to load the firmware). If the
2689          * RF-Kill switch is toggled, we will find out after having loaded
2690          * the firmware and return the proper value to the caller.
2691          */
2692         iwm_enable_fw_load_int(sc);
2693
2694         /* really make sure rfkill handshake bits are cleared */
2695         /* maybe we should write a few times more?  just to make sure */
2696         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2697         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2698
2699         /* Load the given image to the HW */
2700         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2701                 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2702         else
2703                 ret = iwm_pcie_load_given_ucode(sc, fw);
2704
2705         /* XXX re-check RF-Kill state */
2706
2707 out:
2708         return ret;
2709 }
2710
2711 static int
2712 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2713 {
2714         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2715                 .valid = htole32(valid_tx_ant),
2716         };
2717
2718         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2719             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2720 }
2721
2722 /* iwlwifi: mvm/fw.c */
2723 static int
2724 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2725 {
2726         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2727         enum iwm_ucode_type ucode_type = sc->cur_ucode;
2728
2729         /* Set parameters */
2730         phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2731         phy_cfg_cmd.calib_control.event_trigger =
2732             sc->sc_default_calib[ucode_type].event_trigger;
2733         phy_cfg_cmd.calib_control.flow_trigger =
2734             sc->sc_default_calib[ucode_type].flow_trigger;
2735
2736         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2737             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2738         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2739             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2740 }
2741
2742 static int
2743 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2744 {
2745         struct iwm_mvm_alive_data *alive_data = data;
2746         struct iwm_mvm_alive_resp_ver1 *palive1;
2747         struct iwm_mvm_alive_resp_ver2 *palive2;
2748         struct iwm_mvm_alive_resp *palive;
2749
2750         if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2751                 palive1 = (void *)pkt->data;
2752
2753                 sc->support_umac_log = FALSE;
2754                 sc->error_event_table =
2755                         le32toh(palive1->error_event_table_ptr);
2756                 sc->log_event_table =
2757                         le32toh(palive1->log_event_table_ptr);
2758                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2759
2760                 alive_data->valid = le16toh(palive1->status) ==
2761                                     IWM_ALIVE_STATUS_OK;
2762                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2763                             "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2764                              le16toh(palive1->status), palive1->ver_type,
2765                              palive1->ver_subtype, palive1->flags);
2766         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2767                 palive2 = (void *)pkt->data;
2768                 sc->error_event_table =
2769                         le32toh(palive2->error_event_table_ptr);
2770                 sc->log_event_table =
2771                         le32toh(palive2->log_event_table_ptr);
2772                 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2773                 sc->umac_error_event_table =
2774                         le32toh(palive2->error_info_addr);
2775
2776                 alive_data->valid = le16toh(palive2->status) ==
2777                                     IWM_ALIVE_STATUS_OK;
2778                 if (sc->umac_error_event_table)
2779                         sc->support_umac_log = TRUE;
2780
2781                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2782                             "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2783                             le16toh(palive2->status), palive2->ver_type,
2784                             palive2->ver_subtype, palive2->flags);
2785
2786                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2787                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2788                             palive2->umac_major, palive2->umac_minor);
2789         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2790                 palive = (void *)pkt->data;
2791
2792                 sc->error_event_table =
2793                         le32toh(palive->error_event_table_ptr);
2794                 sc->log_event_table =
2795                         le32toh(palive->log_event_table_ptr);
2796                 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2797                 sc->umac_error_event_table =
2798                         le32toh(palive->error_info_addr);
2799
2800                 alive_data->valid = le16toh(palive->status) ==
2801                                     IWM_ALIVE_STATUS_OK;
2802                 if (sc->umac_error_event_table)
2803                         sc->support_umac_log = TRUE;
2804
2805                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2806                             "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2807                             le16toh(palive->status), palive->ver_type,
2808                             palive->ver_subtype, palive->flags);
2809
2810                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2811                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2812                             le32toh(palive->umac_major),
2813                             le32toh(palive->umac_minor));
2814         }
2815
2816         return TRUE;
2817 }
2818
2819 static int
2820 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2821         struct iwm_rx_packet *pkt, void *data)
2822 {
2823         struct iwm_phy_db *phy_db = data;
2824
2825         if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2826                 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2827                         device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2828                             __func__, pkt->hdr.code);
2829                 }
2830                 return TRUE;
2831         }
2832
2833         if (iwm_phy_db_set_section(phy_db, pkt)) {
2834                 device_printf(sc->sc_dev,
2835                     "%s: iwm_phy_db_set_section failed\n", __func__);
2836         }
2837
2838         return FALSE;
2839 }
2840
2841 static int
2842 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2843         enum iwm_ucode_type ucode_type)
2844 {
2845         struct iwm_notification_wait alive_wait;
2846         struct iwm_mvm_alive_data alive_data;
2847         const struct iwm_fw_sects *fw;
2848         enum iwm_ucode_type old_type = sc->cur_ucode;
2849         int error;
2850         static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2851
2852         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2853                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2854                         error);
2855                 return error;
2856         }
2857         fw = &sc->sc_fw.fw_sects[ucode_type];
2858         sc->cur_ucode = ucode_type;
2859         sc->ucode_loaded = FALSE;
2860
2861         memset(&alive_data, 0, sizeof(alive_data));
2862         iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2863                                    alive_cmd, nitems(alive_cmd),
2864                                    iwm_alive_fn, &alive_data);
2865
2866         error = iwm_start_fw(sc, fw);
2867         if (error) {
2868                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2869                 sc->cur_ucode = old_type;
2870                 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2871                 return error;
2872         }
2873
2874         /*
2875          * Some things may run in the background now, but we
2876          * just wait for the ALIVE notification here.
2877          */
2878         IWM_UNLOCK(sc);
2879         error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2880                                       IWM_MVM_UCODE_ALIVE_TIMEOUT);
2881         IWM_LOCK(sc);
2882         if (error) {
2883                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2884                         uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2885                         if (iwm_nic_lock(sc)) {
2886                                 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2887                                 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2888                                 iwm_nic_unlock(sc);
2889                         }
2890                         device_printf(sc->sc_dev,
2891                             "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2892                             a, b);
2893                 }
2894                 sc->cur_ucode = old_type;
2895                 return error;
2896         }
2897
2898         if (!alive_data.valid) {
2899                 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2900                     __func__);
2901                 sc->cur_ucode = old_type;
2902                 return EIO;
2903         }
2904
2905         iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2906
2907         /*
2908          * configure and operate fw paging mechanism.
2909          * driver configures the paging flow only once, CPU2 paging image
2910          * included in the IWM_UCODE_INIT image.
2911          */
2912         if (fw->paging_mem_size) {
2913                 error = iwm_save_fw_paging(sc, fw);
2914                 if (error) {
2915                         device_printf(sc->sc_dev,
2916                             "%s: failed to save the FW paging image\n",
2917                             __func__);
2918                         return error;
2919                 }
2920
2921                 error = iwm_send_paging_cmd(sc, fw);
2922                 if (error) {
2923                         device_printf(sc->sc_dev,
2924                             "%s: failed to send the paging cmd\n", __func__);
2925                         iwm_free_fw_paging(sc);
2926                         return error;
2927                 }
2928         }
2929
2930         if (!error)
2931                 sc->ucode_loaded = TRUE;
2932         return error;
2933 }
2934
2935 /*
2936  * mvm misc bits
2937  */
2938
2939 /*
2940  * follows iwlwifi/fw.c
2941  */
2942 static int
2943 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2944 {
2945         struct iwm_notification_wait calib_wait;
2946         static const uint16_t init_complete[] = {
2947                 IWM_INIT_COMPLETE_NOTIF,
2948                 IWM_CALIB_RES_NOTIF_PHY_DB
2949         };
2950         int ret;
2951
2952         /* do not operate with rfkill switch turned on */
2953         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2954                 device_printf(sc->sc_dev,
2955                     "radio is disabled by hardware switch\n");
2956                 return EPERM;
2957         }
2958
2959         iwm_init_notification_wait(sc->sc_notif_wait,
2960                                    &calib_wait,
2961                                    init_complete,
2962                                    nitems(init_complete),
2963                                    iwm_wait_phy_db_entry,
2964                                    sc->sc_phy_db);
2965
2966         /* Will also start the device */
2967         ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2968         if (ret) {
2969                 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2970                     ret);
2971                 goto error;
2972         }
2973
2974         if (justnvm) {
2975                 /* Read nvm */
2976                 ret = iwm_nvm_init(sc);
2977                 if (ret) {
2978                         device_printf(sc->sc_dev, "failed to read nvm\n");
2979                         goto error;
2980                 }
2981                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2982                 goto error;
2983         }
2984
2985         ret = iwm_send_bt_init_conf(sc);
2986         if (ret) {
2987                 device_printf(sc->sc_dev,
2988                     "failed to send bt coex configuration: %d\n", ret);
2989                 goto error;
2990         }
2991
2992         /* Init Smart FIFO. */
2993         ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2994         if (ret)
2995                 goto error;
2996
2997         /* Send TX valid antennas before triggering calibrations */
2998         ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
2999         if (ret) {
3000                 device_printf(sc->sc_dev,
3001                     "failed to send antennas before calibration: %d\n", ret);
3002                 goto error;
3003         }
3004
3005         /*
3006          * Send phy configurations command to init uCode
3007          * to start the 16.0 uCode init image internal calibrations.
3008          */
3009         ret = iwm_send_phy_cfg_cmd(sc);
3010         if (ret) {
3011                 device_printf(sc->sc_dev,
3012                     "%s: Failed to run INIT calibrations: %d\n",
3013                     __func__, ret);
3014                 goto error;
3015         }
3016
3017         /*
3018          * Nothing to do but wait for the init complete notification
3019          * from the firmware.
3020          */
3021         IWM_UNLOCK(sc);
3022         ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3023             IWM_MVM_UCODE_CALIB_TIMEOUT);
3024         IWM_LOCK(sc);
3025
3026
3027         goto out;
3028
3029 error:
3030         iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3031 out:
3032         return ret;
3033 }
3034
3035 /*
3036  * receive side
3037  */
3038
3039 /* (re)stock rx ring, called at init-time and at runtime */
3040 static int
3041 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3042 {
3043         struct iwm_rx_ring *ring = &sc->rxq;
3044         struct iwm_rx_data *data = &ring->data[idx];
3045         struct mbuf *m;
3046         bus_dmamap_t dmamap;
3047         bus_dma_segment_t seg;
3048         int nsegs, error;
3049
3050         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3051         if (m == NULL)
3052                 return ENOBUFS;
3053
3054         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3055         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3056             &seg, &nsegs, BUS_DMA_NOWAIT);
3057         if (error != 0) {
3058                 device_printf(sc->sc_dev,
3059                     "%s: can't map mbuf, error %d\n", __func__, error);
3060                 m_freem(m);
3061                 return error;
3062         }
3063
3064         if (data->m != NULL)
3065                 bus_dmamap_unload(ring->data_dmat, data->map);
3066
3067         /* Swap ring->spare_map with data->map */
3068         dmamap = data->map;
3069         data->map = ring->spare_map;
3070         ring->spare_map = dmamap;
3071
3072         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3073         data->m = m;
3074
3075         /* Update RX descriptor. */
3076         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3077         ring->desc[idx] = htole32(seg.ds_addr >> 8);
3078         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3079             BUS_DMASYNC_PREWRITE);
3080
3081         return 0;
3082 }
3083
3084 /* iwlwifi: mvm/rx.c */
3085 /*
3086  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3087  * values are reported by the fw as positive values - need to negate
3088  * to obtain their dBM.  Account for missing antennas by replacing 0
3089  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3090  */
3091 static int
3092 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3093 {
3094         int energy_a, energy_b, energy_c, max_energy;
3095         uint32_t val;
3096
3097         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3098         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3099             IWM_RX_INFO_ENERGY_ANT_A_POS;
3100         energy_a = energy_a ? -energy_a : -256;
3101         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3102             IWM_RX_INFO_ENERGY_ANT_B_POS;
3103         energy_b = energy_b ? -energy_b : -256;
3104         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3105             IWM_RX_INFO_ENERGY_ANT_C_POS;
3106         energy_c = energy_c ? -energy_c : -256;
3107         max_energy = MAX(energy_a, energy_b);
3108         max_energy = MAX(max_energy, energy_c);
3109
3110         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3111             "energy In A %d B %d C %d , and max %d\n",
3112             energy_a, energy_b, energy_c, max_energy);
3113
3114         return max_energy;
3115 }
3116
3117 static void
3118 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3119 {
3120         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3121
3122         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3123
3124         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3125 }
3126
3127 /*
3128  * Retrieve the average noise (in dBm) among receivers.
3129  */
3130 static int
3131 iwm_get_noise(struct iwm_softc *sc,
3132     const struct iwm_mvm_statistics_rx_non_phy *stats)
3133 {
3134         int i, total, nbant, noise;
3135
3136         total = nbant = noise = 0;
3137         for (i = 0; i < 3; i++) {
3138                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3139                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3140                     __func__,
3141                     i,
3142                     noise);
3143
3144                 if (noise) {
3145                         total += noise;
3146                         nbant++;
3147                 }
3148         }
3149
3150         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3151             __func__, nbant, total);
3152 #if 0
3153         /* There should be at least one antenna but check anyway. */
3154         return (nbant == 0) ? -127 : (total / nbant) - 107;
3155 #else
3156         /* For now, just hard-code it to -96 to be safe */
3157         return (-96);
3158 #endif
3159 }
3160
3161 /*
3162  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3163  *
3164  * Handles the actual data of the Rx packet from the fw
3165  */
3166 static boolean_t
3167 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3168         boolean_t stolen)
3169 {
3170         struct ieee80211com *ic = &sc->sc_ic;
3171         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3172         struct ieee80211_frame *wh;
3173         struct ieee80211_node *ni;
3174         struct ieee80211_rx_stats rxs;
3175         struct iwm_rx_phy_info *phy_info;
3176         struct iwm_rx_mpdu_res_start *rx_res;
3177         struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3178         uint32_t len;
3179         uint32_t rx_pkt_status;
3180         int rssi;
3181
3182         phy_info = &sc->sc_last_phy_info;
3183         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3184         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3185         len = le16toh(rx_res->byte_count);
3186         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3187
3188         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3189                 device_printf(sc->sc_dev,
3190                     "dsp size out of range [0,20]: %d\n",
3191                     phy_info->cfg_phy_cnt);
3192                 goto fail;
3193         }
3194
3195         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3196             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3197                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3198                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3199                 goto fail;
3200         }
3201
3202         rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3203
3204         /* Map it to relative value */
3205         rssi = rssi - sc->sc_noise;
3206
3207         /* replenish ring for the buffer we're going to feed to the sharks */
3208         if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3209                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3210                     __func__);
3211                 goto fail;
3212         }
3213
3214         m->m_data = pkt->data + sizeof(*rx_res);
3215         m->m_pkthdr.len = m->m_len = len;
3216
3217         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3218             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3219
3220         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3221
3222         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3223             "%s: phy_info: channel=%d, flags=0x%08x\n",
3224             __func__,
3225             le16toh(phy_info->channel),
3226             le16toh(phy_info->phy_flags));
3227
3228         /*
3229          * Populate an RX state struct with the provided information.
3230          */
3231         bzero(&rxs, sizeof(rxs));
3232         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3233         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3234         rxs.c_ieee = le16toh(phy_info->channel);
3235         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3236                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3237         } else {
3238                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3239         }
3240
3241         /* rssi is in 1/2db units */
3242         rxs.rssi = rssi * 2;
3243         rxs.nf = sc->sc_noise;
3244
3245         if (ieee80211_radiotap_active_vap(vap)) {
3246                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3247
3248                 tap->wr_flags = 0;
3249                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3250                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3251                 tap->wr_chan_freq = htole16(rxs.c_freq);
3252                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3253                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3254                 tap->wr_dbm_antsignal = (int8_t)rssi;
3255                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3256                 tap->wr_tsft = phy_info->system_timestamp;
3257                 switch (phy_info->rate) {
3258                 /* CCK rates. */
3259                 case  10: tap->wr_rate =   2; break;
3260                 case  20: tap->wr_rate =   4; break;
3261                 case  55: tap->wr_rate =  11; break;
3262                 case 110: tap->wr_rate =  22; break;
3263                 /* OFDM rates. */
3264                 case 0xd: tap->wr_rate =  12; break;
3265                 case 0xf: tap->wr_rate =  18; break;
3266                 case 0x5: tap->wr_rate =  24; break;
3267                 case 0x7: tap->wr_rate =  36; break;
3268                 case 0x9: tap->wr_rate =  48; break;
3269                 case 0xb: tap->wr_rate =  72; break;
3270                 case 0x1: tap->wr_rate =  96; break;
3271                 case 0x3: tap->wr_rate = 108; break;
3272                 /* Unknown rate: should not happen. */
3273                 default:  tap->wr_rate =   0;
3274                 }
3275         }
3276
3277         IWM_UNLOCK(sc);
3278         if (ni != NULL) {
3279                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3280                 ieee80211_input_mimo(ni, m, &rxs);
3281                 ieee80211_free_node(ni);
3282         } else {
3283                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3284                 ieee80211_input_mimo_all(ic, m, &rxs);
3285         }
3286         IWM_LOCK(sc);
3287
3288         return TRUE;
3289
3290 fail:   counter_u64_add(ic->ic_ierrors, 1);
3291         return FALSE;
3292 }
3293
3294 static int
3295 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3296         struct iwm_node *in)
3297 {
3298         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3299         struct ieee80211_node *ni = &in->in_ni;
3300         struct ieee80211vap *vap = ni->ni_vap;
3301         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3302         int failack = tx_resp->failure_frame;
3303
3304         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3305
3306         /* Update rate control statistics. */
3307         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3308             __func__,
3309             (int) le16toh(tx_resp->status.status),
3310             (int) le16toh(tx_resp->status.sequence),
3311             tx_resp->frame_count,
3312             tx_resp->bt_kill_count,
3313             tx_resp->failure_rts,
3314             tx_resp->failure_frame,
3315             le32toh(tx_resp->initial_rate),
3316             (int) le16toh(tx_resp->wireless_media_time));
3317
3318         if (status != IWM_TX_STATUS_SUCCESS &&
3319             status != IWM_TX_STATUS_DIRECT_DONE) {
3320                 ieee80211_ratectl_tx_complete(vap, ni,
3321                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3322                 return (1);
3323         } else {
3324                 ieee80211_ratectl_tx_complete(vap, ni,
3325                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3326                 return (0);
3327         }
3328 }
3329
3330 static void
3331 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3332 {
3333         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3334         int idx = cmd_hdr->idx;
3335         int qid = cmd_hdr->qid;
3336         struct iwm_tx_ring *ring = &sc->txq[qid];
3337         struct iwm_tx_data *txd = &ring->data[idx];
3338         struct iwm_node *in = txd->in;
3339         struct mbuf *m = txd->m;
3340         int status;
3341
3342         KASSERT(txd->done == 0, ("txd not done"));
3343         KASSERT(txd->in != NULL, ("txd without node"));
3344         KASSERT(txd->m != NULL, ("txd without mbuf"));
3345
3346         sc->sc_tx_timer = 0;
3347
3348         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3349
3350         /* Unmap and free mbuf. */
3351         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3352         bus_dmamap_unload(ring->data_dmat, txd->map);
3353
3354         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3355             "free txd %p, in %p\n", txd, txd->in);
3356         txd->done = 1;
3357         txd->m = NULL;
3358         txd->in = NULL;
3359
3360         ieee80211_tx_complete(&in->in_ni, m, status);
3361
3362         if (--ring->queued < IWM_TX_RING_LOMARK) {
3363                 sc->qfullmsk &= ~(1 << ring->qid);
3364                 if (sc->qfullmsk == 0) {
3365                         iwm_start(sc);
3366                 }
3367         }
3368 }
3369
3370 /*
3371  * transmit side
3372  */
3373
3374 /*
3375  * Process a "command done" firmware notification.  This is where we wakeup
3376  * processes waiting for a synchronous command completion.
3377  * from if_iwn
3378  */
3379 static void
3380 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3381 {
3382         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3383         struct iwm_tx_data *data;
3384
3385         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3386                 return; /* Not a command ack. */
3387         }
3388
3389         /* XXX wide commands? */
3390         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3391             "cmd notification type 0x%x qid %d idx %d\n",
3392             pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3393
3394         data = &ring->data[pkt->hdr.idx];
3395
3396         /* If the command was mapped in an mbuf, free it. */
3397         if (data->m != NULL) {
3398                 bus_dmamap_sync(ring->data_dmat, data->map,
3399                     BUS_DMASYNC_POSTWRITE);
3400                 bus_dmamap_unload(ring->data_dmat, data->map);
3401                 m_freem(data->m);
3402                 data->m = NULL;
3403         }
3404         wakeup(&ring->desc[pkt->hdr.idx]);
3405
3406         if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3407                 device_printf(sc->sc_dev,
3408                     "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3409                     __func__, pkt->hdr.idx, ring->queued, ring->cur);
3410                 /* XXX call iwm_force_nmi() */
3411         }
3412
3413         KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3414         ring->queued--;
3415         if (ring->queued == 0)
3416                 iwm_pcie_clear_cmd_in_flight(sc);
3417 }
3418
3419 #if 0
3420 /*
3421  * necessary only for block ack mode
3422  */
3423 void
3424 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3425         uint16_t len)
3426 {
3427         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3428         uint16_t w_val;
3429
3430         scd_bc_tbl = sc->sched_dma.vaddr;
3431
3432         len += 8; /* magic numbers came naturally from paris */
3433         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3434                 len = roundup(len, 4) / 4;
3435
3436         w_val = htole16(sta_id << 12 | len);
3437
3438         /* Update TX scheduler. */
3439         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3440         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3441             BUS_DMASYNC_PREWRITE);
3442
3443         /* I really wonder what this is ?!? */
3444         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3445                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3446                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3447                     BUS_DMASYNC_PREWRITE);
3448         }
3449 }
3450 #endif
3451
3452 /*
3453  * Take an 802.11 (non-n) rate, find the relevant rate
3454  * table entry.  return the index into in_ridx[].
3455  *
3456  * The caller then uses that index back into in_ridx
3457  * to figure out the rate index programmed /into/
3458  * the firmware for this given node.
3459  */
3460 static int
3461 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3462     uint8_t rate)
3463 {
3464         int i;
3465         uint8_t r;
3466
3467         for (i = 0; i < nitems(in->in_ridx); i++) {
3468                 r = iwm_rates[in->in_ridx[i]].rate;
3469                 if (rate == r)
3470                         return (i);
3471         }
3472
3473         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3474             "%s: couldn't find an entry for rate=%d\n",
3475             __func__,
3476             rate);
3477
3478         /* XXX Return the first */
3479         /* XXX TODO: have it return the /lowest/ */
3480         return (0);
3481 }
3482
3483 static int
3484 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3485 {
3486         int i;
3487
3488         for (i = 0; i < nitems(iwm_rates); i++) {
3489                 if (iwm_rates[i].rate == rate)
3490                         return (i);
3491         }
3492         /* XXX error? */
3493         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3494             "%s: couldn't find an entry for rate=%d\n",
3495             __func__,
3496             rate);
3497         return (0);
3498 }
3499
3500 /*
3501  * Fill in the rate related information for a transmit command.
3502  */
3503 static const struct iwm_rate *
3504 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3505         struct mbuf *m, struct iwm_tx_cmd *tx)
3506 {
3507         struct ieee80211_node *ni = &in->in_ni;
3508         struct ieee80211_frame *wh;
3509         const struct ieee80211_txparam *tp = ni->ni_txparms;
3510         const struct iwm_rate *rinfo;
3511         int type;
3512         int ridx, rate_flags;
3513
3514         wh = mtod(m, struct ieee80211_frame *);
3515         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3516
3517         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3518         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3519
3520         if (type == IEEE80211_FC0_TYPE_MGT) {
3521                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3522                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3523                     "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3524         } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3525                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3526                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3527                     "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3528         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3529                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3530                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3531                     "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3532         } else if (m->m_flags & M_EAPOL) {
3533                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3534                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3535                     "%s: EAPOL\n", __func__);
3536         } else if (type == IEEE80211_FC0_TYPE_DATA) {
3537                 int i;
3538
3539                 /* for data frames, use RS table */
3540                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3541                 /* XXX pass pktlen */
3542                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3543                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3544                 ridx = in->in_ridx[i];
3545
3546                 /* This is the index into the programmed table */
3547                 tx->initial_rate_index = i;
3548                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3549
3550                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3551                     "%s: start with i=%d, txrate %d\n",
3552                     __func__, i, iwm_rates[ridx].rate);
3553         } else {
3554                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3555                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3556                     __func__, tp->mgmtrate);
3557         }
3558
3559         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3560             "%s: frame type=%d txrate %d\n",
3561                 __func__, type, iwm_rates[ridx].rate);
3562
3563         rinfo = &iwm_rates[ridx];
3564
3565         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3566             __func__, ridx,
3567             rinfo->rate,
3568             !! (IWM_RIDX_IS_CCK(ridx))
3569             );
3570
3571         /* XXX TODO: hard-coded TX antenna? */
3572         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3573         if (IWM_RIDX_IS_CCK(ridx))
3574                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3575         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3576
3577         return rinfo;
3578 }
3579
3580 #define TB0_SIZE 16
3581 static int
3582 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3583 {
3584         struct ieee80211com *ic = &sc->sc_ic;
3585         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3586         struct iwm_node *in = IWM_NODE(ni);
3587         struct iwm_tx_ring *ring;
3588         struct iwm_tx_data *data;
3589         struct iwm_tfd *desc;
3590         struct iwm_device_cmd *cmd;
3591         struct iwm_tx_cmd *tx;
3592         struct ieee80211_frame *wh;
3593         struct ieee80211_key *k = NULL;
3594         struct mbuf *m1;
3595         const struct iwm_rate *rinfo;
3596         uint32_t flags;
3597         u_int hdrlen;
3598         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3599         int nsegs;
3600         uint8_t tid, type;
3601         int i, totlen, error, pad;
3602
3603         wh = mtod(m, struct ieee80211_frame *);
3604         hdrlen = ieee80211_anyhdrsize(wh);
3605         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3606         tid = 0;
3607         ring = &sc->txq[ac];
3608         desc = &ring->desc[ring->cur];
3609         memset(desc, 0, sizeof(*desc));
3610         data = &ring->data[ring->cur];
3611
3612         /* Fill out iwm_tx_cmd to send to the firmware */
3613         cmd = &ring->cmd[ring->cur];
3614         cmd->hdr.code = IWM_TX_CMD;
3615         cmd->hdr.flags = 0;
3616         cmd->hdr.qid = ring->qid;
3617         cmd->hdr.idx = ring->cur;
3618
3619         tx = (void *)cmd->data;
3620         memset(tx, 0, sizeof(*tx));
3621
3622         rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3623
3624         /* Encrypt the frame if need be. */
3625         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3626                 /* Retrieve key for TX && do software encryption. */
3627                 k = ieee80211_crypto_encap(ni, m);
3628                 if (k == NULL) {
3629                         m_freem(m);
3630                         return (ENOBUFS);
3631                 }
3632                 /* 802.11 header may have moved. */
3633                 wh = mtod(m, struct ieee80211_frame *);
3634         }
3635
3636         if (ieee80211_radiotap_active_vap(vap)) {
3637                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3638
3639                 tap->wt_flags = 0;
3640                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3641                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3642                 tap->wt_rate = rinfo->rate;
3643                 if (k != NULL)
3644                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3645                 ieee80211_radiotap_tx(vap, m);
3646         }
3647
3648
3649         totlen = m->m_pkthdr.len;
3650
3651         flags = 0;
3652         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3653                 flags |= IWM_TX_CMD_FLG_ACK;
3654         }
3655
3656         if (type == IEEE80211_FC0_TYPE_DATA
3657             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3658             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3659                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3660         }
3661
3662         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3663             type != IEEE80211_FC0_TYPE_DATA)
3664                 tx->sta_id = sc->sc_aux_sta.sta_id;
3665         else
3666                 tx->sta_id = IWM_STATION_ID;
3667
3668         if (type == IEEE80211_FC0_TYPE_MGT) {
3669                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3670
3671                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3672                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3673                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3674                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3675                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3676                 } else {
3677                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3678                 }
3679         } else {
3680                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3681         }
3682
3683         if (hdrlen & 3) {
3684                 /* First segment length must be a multiple of 4. */
3685                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3686                 pad = 4 - (hdrlen & 3);
3687         } else
3688                 pad = 0;
3689
3690         tx->driver_txop = 0;
3691         tx->next_frame_len = 0;
3692
3693         tx->len = htole16(totlen);
3694         tx->tid_tspec = tid;
3695         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3696
3697         /* Set physical address of "scratch area". */
3698         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3699         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3700
3701         /* Copy 802.11 header in TX command. */
3702         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3703
3704         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3705
3706         tx->sec_ctl = 0;
3707         tx->tx_flags |= htole32(flags);
3708
3709         /* Trim 802.11 header. */
3710         m_adj(m, hdrlen);
3711         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3712             segs, &nsegs, BUS_DMA_NOWAIT);
3713         if (error != 0) {
3714                 if (error != EFBIG) {
3715                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3716                             error);
3717                         m_freem(m);
3718                         return error;
3719                 }
3720                 /* Too many DMA segments, linearize mbuf. */
3721                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3722                 if (m1 == NULL) {
3723                         device_printf(sc->sc_dev,
3724                             "%s: could not defrag mbuf\n", __func__);
3725                         m_freem(m);
3726                         return (ENOBUFS);
3727                 }
3728                 m = m1;
3729
3730                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3731                     segs, &nsegs, BUS_DMA_NOWAIT);
3732                 if (error != 0) {
3733                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3734                             error);
3735                         m_freem(m);
3736                         return error;
3737                 }
3738         }
3739         data->m = m;
3740         data->in = in;
3741         data->done = 0;
3742
3743         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3744             "sending txd %p, in %p\n", data, data->in);
3745         KASSERT(data->in != NULL, ("node is NULL"));
3746
3747         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3748             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3749             ring->qid, ring->cur, totlen, nsegs,
3750             le32toh(tx->tx_flags),
3751             le32toh(tx->rate_n_flags),
3752             tx->initial_rate_index
3753             );
3754
3755         /* Fill TX descriptor. */
3756         desc->num_tbs = 2 + nsegs;
3757
3758         desc->tbs[0].lo = htole32(data->cmd_paddr);
3759         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3760             (TB0_SIZE << 4);
3761         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3762         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3763             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3764               + hdrlen + pad - TB0_SIZE) << 4);
3765
3766         /* Other DMA segments are for data payload. */
3767         for (i = 0; i < nsegs; i++) {
3768                 seg = &segs[i];
3769                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3770                 desc->tbs[i+2].hi_n_len = \
3771                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3772                     | ((seg->ds_len) << 4);
3773         }
3774
3775         bus_dmamap_sync(ring->data_dmat, data->map,
3776             BUS_DMASYNC_PREWRITE);
3777         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3778             BUS_DMASYNC_PREWRITE);
3779         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3780             BUS_DMASYNC_PREWRITE);
3781
3782 #if 0
3783         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3784 #endif
3785
3786         /* Kick TX ring. */
3787         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3788         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3789
3790         /* Mark TX ring as full if we reach a certain threshold. */
3791         if (++ring->queued > IWM_TX_RING_HIMARK) {
3792                 sc->qfullmsk |= 1 << ring->qid;
3793         }
3794
3795         return 0;
3796 }
3797
3798 static int
3799 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3800     const struct ieee80211_bpf_params *params)
3801 {
3802         struct ieee80211com *ic = ni->ni_ic;
3803         struct iwm_softc *sc = ic->ic_softc;
3804         int error = 0;
3805
3806         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3807             "->%s begin\n", __func__);
3808
3809         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3810                 m_freem(m);
3811                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3812                     "<-%s not RUNNING\n", __func__);
3813                 return (ENETDOWN);
3814         }
3815
3816         IWM_LOCK(sc);
3817         /* XXX fix this */
3818         if (params == NULL) {
3819                 error = iwm_tx(sc, m, ni, 0);
3820         } else {
3821                 error = iwm_tx(sc, m, ni, 0);
3822         }
3823         sc->sc_tx_timer = 5;
3824         IWM_UNLOCK(sc);
3825
3826         return (error);
3827 }
3828
3829 /*
3830  * mvm/tx.c
3831  */
3832
3833 /*
3834  * Note that there are transports that buffer frames before they reach
3835  * the firmware. This means that after flush_tx_path is called, the
3836  * queue might not be empty. The race-free way to handle this is to:
3837  * 1) set the station as draining
3838  * 2) flush the Tx path
3839  * 3) wait for the transport queues to be empty
3840  */
3841 int
3842 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3843 {
3844         int ret;
3845         struct iwm_tx_path_flush_cmd flush_cmd = {
3846                 .queues_ctl = htole32(tfd_msk),
3847                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3848         };
3849
3850         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3851             sizeof(flush_cmd), &flush_cmd);
3852         if (ret)
3853                 device_printf(sc->sc_dev,
3854                     "Flushing tx queue failed: %d\n", ret);
3855         return ret;
3856 }
3857
3858 /*
3859  * BEGIN mvm/quota.c
3860  */
3861
3862 static int
3863 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3864 {
3865         struct iwm_time_quota_cmd cmd;
3866         int i, idx, ret, num_active_macs, quota, quota_rem;
3867         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3868         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3869         uint16_t id;
3870
3871         memset(&cmd, 0, sizeof(cmd));
3872
3873         /* currently, PHY ID == binding ID */
3874         if (ivp) {
3875                 id = ivp->phy_ctxt->id;
3876                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3877                 colors[id] = ivp->phy_ctxt->color;
3878
3879                 if (1)
3880                         n_ifs[id] = 1;
3881         }
3882
3883         /*
3884          * The FW's scheduling session consists of
3885          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3886          * equally between all the bindings that require quota
3887          */
3888         num_active_macs = 0;
3889         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3890                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3891                 num_active_macs += n_ifs[i];
3892         }
3893
3894         quota = 0;
3895         quota_rem = 0;
3896         if (num_active_macs) {
3897                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3898                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3899         }
3900
3901         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3902                 if (colors[i] < 0)
3903                         continue;
3904
3905                 cmd.quotas[idx].id_and_color =
3906                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3907
3908                 if (n_ifs[i] <= 0) {
3909                         cmd.quotas[idx].quota = htole32(0);
3910                         cmd.quotas[idx].max_duration = htole32(0);
3911                 } else {
3912                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3913                         cmd.quotas[idx].max_duration = htole32(0);
3914                 }
3915                 idx++;
3916         }
3917
3918         /* Give the remainder of the session to the first binding */
3919         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3920
3921         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3922             sizeof(cmd), &cmd);
3923         if (ret)
3924                 device_printf(sc->sc_dev,
3925                     "%s: Failed to send quota: %d\n", __func__, ret);
3926         return ret;
3927 }
3928
3929 /*
3930  * END mvm/quota.c
3931  */
3932
3933 /*
3934  * ieee80211 routines
3935  */
3936
3937 /*
3938  * Change to AUTH state in 80211 state machine.  Roughly matches what
3939  * Linux does in bss_info_changed().
3940  */
3941 static int
3942 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3943 {
3944         struct ieee80211_node *ni;
3945         struct iwm_node *in;
3946         struct iwm_vap *iv = IWM_VAP(vap);
3947         uint32_t duration;
3948         int error;
3949
3950         /*
3951          * XXX i have a feeling that the vap node is being
3952          * freed from underneath us. Grr.
3953          */
3954         ni = ieee80211_ref_node(vap->iv_bss);
3955         in = IWM_NODE(ni);
3956         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3957             "%s: called; vap=%p, bss ni=%p\n",
3958             __func__,
3959             vap,
3960             ni);
3961
3962         in->in_assoc = 0;
3963
3964         /*
3965          * Firmware bug - it'll crash if the beacon interval is less
3966          * than 16. We can't avoid connecting at all, so refuse the
3967          * station state change, this will cause net80211 to abandon
3968          * attempts to connect to this AP, and eventually wpa_s will
3969          * blacklist the AP...
3970          */
3971         if (ni->ni_intval < 16) {
3972                 device_printf(sc->sc_dev,
3973                     "AP %s beacon interval is %d, refusing due to firmware bug!\n",
3974                     ether_sprintf(ni->ni_bssid), ni->ni_intval);
3975                 error = EINVAL;
3976                 goto out;
3977         }
3978
3979         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3980         if (error != 0)
3981                 return error;
3982
3983         error = iwm_allow_mcast(vap, sc);
3984         if (error) {
3985                 device_printf(sc->sc_dev,
3986                     "%s: failed to set multicast\n", __func__);
3987                 goto out;
3988         }
3989
3990         /*
3991          * This is where it deviates from what Linux does.
3992          *
3993          * Linux iwlwifi doesn't reset the nic each time, nor does it
3994          * call ctxt_add() here.  Instead, it adds it during vap creation,
3995          * and always does a mac_ctx_changed().
3996          *
3997          * The openbsd port doesn't attempt to do that - it reset things
3998          * at odd states and does the add here.
3999          *
4000          * So, until the state handling is fixed (ie, we never reset
4001          * the NIC except for a firmware failure, which should drag
4002          * the NIC back to IDLE, re-setup and re-add all the mac/phy
4003          * contexts that are required), let's do a dirty hack here.
4004          */
4005         if (iv->is_uploaded) {
4006                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4007                         device_printf(sc->sc_dev,
4008                             "%s: failed to update MAC\n", __func__);
4009                         goto out;
4010                 }
4011                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4012                     in->in_ni.ni_chan, 1, 1)) != 0) {
4013                         device_printf(sc->sc_dev,
4014                             "%s: failed update phy ctxt\n", __func__);
4015                         goto out;
4016                 }
4017                 iv->phy_ctxt = &sc->sc_phyctxt[0];
4018
4019                 if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4020                         device_printf(sc->sc_dev,
4021                             "%s: binding update cmd\n", __func__);
4022                         goto out;
4023                 }
4024                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4025                         device_printf(sc->sc_dev,
4026                             "%s: failed to update sta\n", __func__);
4027                         goto out;
4028                 }
4029         } else {
4030                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4031                         device_printf(sc->sc_dev,
4032                             "%s: failed to add MAC\n", __func__);
4033                         goto out;
4034                 }
4035                 if ((error = iwm_mvm_power_update_mac(sc)) != 0) {
4036                         device_printf(sc->sc_dev,
4037                             "%s: failed to update power management\n",
4038                             __func__);
4039                         goto out;
4040                 }
4041                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4042                     in->in_ni.ni_chan, 1, 1)) != 0) {
4043                         device_printf(sc->sc_dev,
4044                             "%s: failed add phy ctxt!\n", __func__);
4045                         error = ETIMEDOUT;
4046                         goto out;
4047                 }
4048                 iv->phy_ctxt = &sc->sc_phyctxt[0];
4049
4050                 if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4051                         device_printf(sc->sc_dev,
4052                             "%s: binding add cmd\n", __func__);
4053                         goto out;
4054                 }
4055                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4056                         device_printf(sc->sc_dev,
4057                             "%s: failed to add sta\n", __func__);
4058                         goto out;
4059                 }
4060         }
4061
4062         /*
4063          * Prevent the FW from wandering off channel during association
4064          * by "protecting" the session with a time event.
4065          */
4066         /* XXX duration is in units of TU, not MS */
4067         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4068         iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4069         DELAY(100);
4070
4071         error = 0;
4072 out:
4073         ieee80211_free_node(ni);
4074         return (error);
4075 }
4076
4077 static int
4078 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4079 {
4080         struct iwm_node *in = IWM_NODE(vap->iv_bss);
4081         int error;
4082
4083         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4084                 device_printf(sc->sc_dev,
4085                     "%s: failed to update STA\n", __func__);
4086                 return error;
4087         }
4088
4089         in->in_assoc = 1;
4090         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4091                 device_printf(sc->sc_dev,
4092                     "%s: failed to update MAC\n", __func__);
4093                 return error;
4094         }
4095
4096         return 0;
4097 }
4098
4099 static int
4100 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4101 {
4102         uint32_t tfd_msk;
4103
4104         /*
4105          * Ok, so *technically* the proper set of calls for going
4106          * from RUN back to SCAN is:
4107          *
4108          * iwm_mvm_power_mac_disable(sc, in);
4109          * iwm_mvm_mac_ctxt_changed(sc, vap);
4110          * iwm_mvm_rm_sta(sc, in);
4111          * iwm_mvm_update_quotas(sc, NULL);
4112          * iwm_mvm_mac_ctxt_changed(sc, in);
4113          * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4114          * iwm_mvm_mac_ctxt_remove(sc, in);
4115          *
4116          * However, that freezes the device not matter which permutations
4117          * and modifications are attempted.  Obviously, this driver is missing
4118          * something since it works in the Linux driver, but figuring out what
4119          * is missing is a little more complicated.  Now, since we're going
4120          * back to nothing anyway, we'll just do a complete device reset.
4121          * Up your's, device!
4122          */
4123         /*
4124          * Just using 0xf for the queues mask is fine as long as we only
4125          * get here from RUN state.
4126          */
4127         tfd_msk = 0xf;
4128         mbufq_drain(&sc->sc_snd);
4129         iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4130         /*
4131          * We seem to get away with just synchronously sending the
4132          * IWM_TXPATH_FLUSH command.
4133          */
4134 //      iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4135         iwm_stop_device(sc);
4136         iwm_init_hw(sc);
4137         if (in)
4138                 in->in_assoc = 0;
4139         return 0;
4140
4141 #if 0
4142         int error;
4143
4144         iwm_mvm_power_mac_disable(sc, in);
4145
4146         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4147                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4148                 return error;
4149         }
4150
4151         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4152                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4153                 return error;
4154         }
4155         error = iwm_mvm_rm_sta(sc, in);
4156         in->in_assoc = 0;
4157         iwm_mvm_update_quotas(sc, NULL);
4158         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4159                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4160                 return error;
4161         }
4162         iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4163
4164         iwm_mvm_mac_ctxt_remove(sc, in);
4165
4166         return error;
4167 #endif
4168 }
4169
4170 static struct ieee80211_node *
4171 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4172 {
4173         return malloc(sizeof (struct iwm_node), M_80211_NODE,
4174             M_NOWAIT | M_ZERO);
4175 }
4176
4177 uint8_t
4178 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4179 {
4180         int i;
4181         uint8_t rval;
4182
4183         for (i = 0; i < rs->rs_nrates; i++) {
4184                 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4185                 if (rval == iwm_rates[ridx].rate)
4186                         return rs->rs_rates[i];
4187         }
4188
4189         return 0;
4190 }
4191
4192 static void
4193 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4194 {
4195         struct ieee80211_node *ni = &in->in_ni;
4196         struct iwm_lq_cmd *lq = &in->in_lq;
4197         int nrates = ni->ni_rates.rs_nrates;
4198         int i, ridx, tab = 0;
4199 //      int txant = 0;
4200
4201         if (nrates > nitems(lq->rs_table)) {
4202                 device_printf(sc->sc_dev,
4203                     "%s: node supports %d rates, driver handles "
4204                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4205                 return;
4206         }
4207         if (nrates == 0) {
4208                 device_printf(sc->sc_dev,
4209                     "%s: node supports 0 rates, odd!\n", __func__);
4210                 return;
4211         }
4212
4213         /*
4214          * XXX .. and most of iwm_node is not initialised explicitly;
4215          * it's all just 0x0 passed to the firmware.
4216          */
4217
4218         /* first figure out which rates we should support */
4219         /* XXX TODO: this isn't 11n aware /at all/ */
4220         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4221         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4222             "%s: nrates=%d\n", __func__, nrates);
4223
4224         /*
4225          * Loop over nrates and populate in_ridx from the highest
4226          * rate to the lowest rate.  Remember, in_ridx[] has
4227          * IEEE80211_RATE_MAXSIZE entries!
4228          */
4229         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4230                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4231
4232                 /* Map 802.11 rate to HW rate index. */
4233                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4234                         if (iwm_rates[ridx].rate == rate)
4235                                 break;
4236                 if (ridx > IWM_RIDX_MAX) {
4237                         device_printf(sc->sc_dev,
4238                             "%s: WARNING: device rate for %d not found!\n",
4239                             __func__, rate);
4240                 } else {
4241                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4242                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4243                             __func__,
4244                             i,
4245                             rate,
4246                             ridx);
4247                         in->in_ridx[i] = ridx;
4248                 }
4249         }
4250
4251         /* then construct a lq_cmd based on those */
4252         memset(lq, 0, sizeof(*lq));
4253         lq->sta_id = IWM_STATION_ID;
4254
4255         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4256         if (ni->ni_flags & IEEE80211_NODE_HT)
4257                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4258
4259         /*
4260          * are these used? (we don't do SISO or MIMO)
4261          * need to set them to non-zero, though, or we get an error.
4262          */
4263         lq->single_stream_ant_msk = 1;
4264         lq->dual_stream_ant_msk = 1;
4265
4266         /*
4267          * Build the actual rate selection table.
4268          * The lowest bits are the rates.  Additionally,
4269          * CCK needs bit 9 to be set.  The rest of the bits
4270          * we add to the table select the tx antenna
4271          * Note that we add the rates in the highest rate first
4272          * (opposite of ni_rates).
4273          */
4274         /*
4275          * XXX TODO: this should be looping over the min of nrates
4276          * and LQ_MAX_RETRY_NUM.  Sigh.
4277          */
4278         for (i = 0; i < nrates; i++) {
4279                 int nextant;
4280
4281 #if 0
4282                 if (txant == 0)
4283                         txant = iwm_mvm_get_valid_tx_ant(sc);
4284                 nextant = 1<<(ffs(txant)-1);
4285                 txant &= ~nextant;
4286 #else
4287                 nextant = iwm_mvm_get_valid_tx_ant(sc);
4288 #endif
4289                 /*
4290                  * Map the rate id into a rate index into
4291                  * our hardware table containing the
4292                  * configuration to use for this rate.
4293                  */
4294                 ridx = in->in_ridx[i];
4295                 tab = iwm_rates[ridx].plcp;
4296                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4297                 if (IWM_RIDX_IS_CCK(ridx))
4298                         tab |= IWM_RATE_MCS_CCK_MSK;
4299                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4300                     "station rate i=%d, rate=%d, hw=%x\n",
4301                     i, iwm_rates[ridx].rate, tab);
4302                 lq->rs_table[i] = htole32(tab);
4303         }
4304         /* then fill the rest with the lowest possible rate */
4305         for (i = nrates; i < nitems(lq->rs_table); i++) {
4306                 KASSERT(tab != 0, ("invalid tab"));
4307                 lq->rs_table[i] = htole32(tab);
4308         }
4309 }
4310
4311 static int
4312 iwm_media_change(struct ifnet *ifp)
4313 {
4314         struct ieee80211vap *vap = ifp->if_softc;
4315         struct ieee80211com *ic = vap->iv_ic;
4316         struct iwm_softc *sc = ic->ic_softc;
4317         int error;
4318
4319         error = ieee80211_media_change(ifp);
4320         if (error != ENETRESET)
4321                 return error;
4322
4323         IWM_LOCK(sc);
4324         if (ic->ic_nrunning > 0) {
4325                 iwm_stop(sc);
4326                 iwm_init(sc);
4327         }
4328         IWM_UNLOCK(sc);
4329         return error;
4330 }
4331
4332
4333 static int
4334 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4335 {
4336         struct iwm_vap *ivp = IWM_VAP(vap);
4337         struct ieee80211com *ic = vap->iv_ic;
4338         struct iwm_softc *sc = ic->ic_softc;
4339         struct iwm_node *in;
4340         int error;
4341
4342         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4343             "switching state %s -> %s\n",
4344             ieee80211_state_name[vap->iv_state],
4345             ieee80211_state_name[nstate]);
4346         IEEE80211_UNLOCK(ic);
4347         IWM_LOCK(sc);
4348
4349         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4350                 iwm_led_blink_stop(sc);
4351
4352         /* disable beacon filtering if we're hopping out of RUN */
4353         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4354                 iwm_mvm_disable_beacon_filter(sc);
4355
4356                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4357                         in->in_assoc = 0;
4358
4359                 if (nstate == IEEE80211_S_INIT) {
4360                         IWM_UNLOCK(sc);
4361                         IEEE80211_LOCK(ic);
4362                         error = ivp->iv_newstate(vap, nstate, arg);
4363                         IEEE80211_UNLOCK(ic);
4364                         IWM_LOCK(sc);
4365                         iwm_release(sc, NULL);
4366                         IWM_UNLOCK(sc);
4367                         IEEE80211_LOCK(ic);
4368                         return error;
4369                 }
4370
4371                 /*
4372                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4373                  * above then the card will be completely reinitialized,
4374                  * so the driver must do everything necessary to bring the card
4375                  * from INIT to SCAN.
4376                  *
4377                  * Additionally, upon receiving deauth frame from AP,
4378                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4379                  * state. This will also fail with this driver, so bring the FSM
4380                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4381                  *
4382                  * XXX TODO: fix this for FreeBSD!
4383                  */
4384                 if (nstate == IEEE80211_S_SCAN ||
4385                     nstate == IEEE80211_S_AUTH ||
4386                     nstate == IEEE80211_S_ASSOC) {
4387                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4388                             "Force transition to INIT; MGT=%d\n", arg);
4389                         IWM_UNLOCK(sc);
4390                         IEEE80211_LOCK(ic);
4391                         /* Always pass arg as -1 since we can't Tx right now. */
4392                         /*
4393                          * XXX arg is just ignored anyway when transitioning
4394                          *     to IEEE80211_S_INIT.
4395                          */
4396                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4397                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4398                             "Going INIT->SCAN\n");
4399                         nstate = IEEE80211_S_SCAN;
4400                         IEEE80211_UNLOCK(ic);
4401                         IWM_LOCK(sc);
4402                 }
4403         }
4404
4405         switch (nstate) {
4406         case IEEE80211_S_INIT:
4407         case IEEE80211_S_SCAN:
4408                 if (vap->iv_state == IEEE80211_S_AUTH ||
4409                     vap->iv_state == IEEE80211_S_ASSOC) {
4410                         int myerr;
4411                         IWM_UNLOCK(sc);
4412                         IEEE80211_LOCK(ic);
4413                         myerr = ivp->iv_newstate(vap, nstate, arg);
4414                         IEEE80211_UNLOCK(ic);
4415                         IWM_LOCK(sc);
4416                         error = iwm_mvm_rm_sta(sc, vap, FALSE);
4417                         if (error) {
4418                                 device_printf(sc->sc_dev,
4419                                     "%s: Failed to remove station: %d\n",
4420                                     __func__, error);
4421                         }
4422                         error = iwm_mvm_mac_ctxt_changed(sc, vap);
4423                         if (error) {
4424                                 device_printf(sc->sc_dev,
4425                                     "%s: Failed to change mac context: %d\n",
4426                                     __func__, error);
4427                         }
4428                         error = iwm_mvm_binding_remove_vif(sc, ivp);
4429                         if (error) {
4430                                 device_printf(sc->sc_dev,
4431                                     "%s: Failed to remove channel ctx: %d\n",
4432                                     __func__, error);
4433                         }
4434                         ivp->phy_ctxt = NULL;
4435                         IWM_UNLOCK(sc);
4436                         IEEE80211_LOCK(ic);
4437                         return myerr;
4438                 }
4439                 break;
4440
4441         case IEEE80211_S_AUTH:
4442                 if ((error = iwm_auth(vap, sc)) != 0) {
4443                         device_printf(sc->sc_dev,
4444                             "%s: could not move to auth state: %d\n",
4445                             __func__, error);
4446                         break;
4447                 }
4448                 break;
4449
4450         case IEEE80211_S_ASSOC:
4451                 if ((error = iwm_assoc(vap, sc)) != 0) {
4452                         device_printf(sc->sc_dev,
4453                             "%s: failed to associate: %d\n", __func__,
4454                             error);
4455                         break;
4456                 }
4457                 break;
4458
4459         case IEEE80211_S_RUN:
4460         {
4461                 struct iwm_host_cmd cmd = {
4462                         .id = IWM_LQ_CMD,
4463                         .len = { sizeof(in->in_lq), },
4464                         .flags = IWM_CMD_SYNC,
4465                 };
4466
4467                 /* Update the association state, now we have it all */
4468                 /* (eg associd comes in at this point */
4469                 error = iwm_assoc(vap, sc);
4470                 if (error != 0) {
4471                         device_printf(sc->sc_dev,
4472                             "%s: failed to update association state: %d\n",
4473                             __func__,
4474                             error);
4475                         break;
4476                 }
4477
4478                 in = IWM_NODE(vap->iv_bss);
4479                 iwm_mvm_enable_beacon_filter(sc, in);
4480                 iwm_mvm_power_update_mac(sc);
4481                 iwm_mvm_update_quotas(sc, ivp);
4482                 iwm_setrates(sc, in);
4483
4484                 cmd.data[0] = &in->in_lq;
4485                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4486                         device_printf(sc->sc_dev,
4487                             "%s: IWM_LQ_CMD failed\n", __func__);
4488                 }
4489
4490                 iwm_mvm_led_enable(sc);
4491                 break;
4492         }
4493
4494         default:
4495                 break;
4496         }
4497         IWM_UNLOCK(sc);
4498         IEEE80211_LOCK(ic);
4499
4500         return (ivp->iv_newstate(vap, nstate, arg));
4501 }
4502
4503 void
4504 iwm_endscan_cb(void *arg, int pending)
4505 {
4506         struct iwm_softc *sc = arg;
4507         struct ieee80211com *ic = &sc->sc_ic;
4508
4509         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4510             "%s: scan ended\n",
4511             __func__);
4512
4513         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4514 }
4515
4516 /*
4517  * Aging and idle timeouts for the different possible scenarios
4518  * in default configuration
4519  */
4520 static const uint32_t
4521 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4522         {
4523                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4524                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4525         },
4526         {
4527                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4528                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4529         },
4530         {
4531                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4532                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4533         },
4534         {
4535                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4536                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4537         },
4538         {
4539                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4540                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4541         },
4542 };
4543
4544 /*
4545  * Aging and idle timeouts for the different possible scenarios
4546  * in single BSS MAC configuration.
4547  */
4548 static const uint32_t
4549 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4550         {
4551                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4552                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4553         },
4554         {
4555                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4556                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4557         },
4558         {
4559                 htole32(IWM_SF_MCAST_AGING_TIMER),
4560                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4561         },
4562         {
4563                 htole32(IWM_SF_BA_AGING_TIMER),
4564                 htole32(IWM_SF_BA_IDLE_TIMER)
4565         },
4566         {
4567                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4568                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4569         },
4570 };
4571
4572 static void
4573 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4574     struct ieee80211_node *ni)
4575 {
4576         int i, j, watermark;
4577
4578         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4579
4580         /*
4581          * If we are in association flow - check antenna configuration
4582          * capabilities of the AP station, and choose the watermark accordingly.
4583          */
4584         if (ni) {
4585                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4586 #ifdef notyet
4587                         if (ni->ni_rxmcs[2] != 0)
4588                                 watermark = IWM_SF_W_MARK_MIMO3;
4589                         else if (ni->ni_rxmcs[1] != 0)
4590                                 watermark = IWM_SF_W_MARK_MIMO2;
4591                         else
4592 #endif
4593                                 watermark = IWM_SF_W_MARK_SISO;
4594                 } else {
4595                         watermark = IWM_SF_W_MARK_LEGACY;
4596                 }
4597         /* default watermark value for unassociated mode. */
4598         } else {
4599                 watermark = IWM_SF_W_MARK_MIMO2;
4600         }
4601         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4602
4603         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4604                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4605                         sf_cmd->long_delay_timeouts[i][j] =
4606                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4607                 }
4608         }
4609
4610         if (ni) {
4611                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4612                        sizeof(iwm_sf_full_timeout));
4613         } else {
4614                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4615                        sizeof(iwm_sf_full_timeout_def));
4616         }
4617 }
4618
4619 static int
4620 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4621 {
4622         struct ieee80211com *ic = &sc->sc_ic;
4623         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4624         struct iwm_sf_cfg_cmd sf_cmd = {
4625                 .state = htole32(IWM_SF_FULL_ON),
4626         };
4627         int ret = 0;
4628
4629         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4630                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4631
4632         switch (new_state) {
4633         case IWM_SF_UNINIT:
4634         case IWM_SF_INIT_OFF:
4635                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4636                 break;
4637         case IWM_SF_FULL_ON:
4638                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4639                 break;
4640         default:
4641                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4642                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4643                           new_state);
4644                 return EINVAL;
4645         }
4646
4647         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4648                                    sizeof(sf_cmd), &sf_cmd);
4649         return ret;
4650 }
4651
4652 static int
4653 iwm_send_bt_init_conf(struct iwm_softc *sc)
4654 {
4655         struct iwm_bt_coex_cmd bt_cmd;
4656
4657         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4658         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4659
4660         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4661             &bt_cmd);
4662 }
4663
4664 static int
4665 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4666 {
4667         struct iwm_mcc_update_cmd mcc_cmd;
4668         struct iwm_host_cmd hcmd = {
4669                 .id = IWM_MCC_UPDATE_CMD,
4670                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4671                 .data = { &mcc_cmd },
4672         };
4673         int ret;
4674 #ifdef IWM_DEBUG
4675         struct iwm_rx_packet *pkt;
4676         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4677         struct iwm_mcc_update_resp *mcc_resp;
4678         int n_channels;
4679         uint16_t mcc;
4680 #endif
4681         int resp_v2 = fw_has_capa(&sc->ucode_capa,
4682             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4683
4684         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4685         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4686         if (fw_has_api(&sc->ucode_capa, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4687             fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4688                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4689         else
4690                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4691
4692         if (resp_v2)
4693                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4694         else
4695                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4696
4697         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4698             "send MCC update to FW with '%c%c' src = %d\n",
4699             alpha2[0], alpha2[1], mcc_cmd.source_id);
4700
4701         ret = iwm_send_cmd(sc, &hcmd);
4702         if (ret)
4703                 return ret;
4704
4705 #ifdef IWM_DEBUG
4706         pkt = hcmd.resp_pkt;
4707
4708         /* Extract MCC response */
4709         if (resp_v2) {
4710                 mcc_resp = (void *)pkt->data;
4711                 mcc = mcc_resp->mcc;
4712                 n_channels =  le32toh(mcc_resp->n_channels);
4713         } else {
4714                 mcc_resp_v1 = (void *)pkt->data;
4715                 mcc = mcc_resp_v1->mcc;
4716                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4717         }
4718
4719         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4720         if (mcc == 0)
4721                 mcc = 0x3030;  /* "00" - world */
4722
4723         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4724             "regulatory domain '%c%c' (%d channels available)\n",
4725             mcc >> 8, mcc & 0xff, n_channels);
4726 #endif
4727         iwm_free_resp(sc, &hcmd);
4728
4729         return 0;
4730 }
4731
4732 static void
4733 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4734 {
4735         struct iwm_host_cmd cmd = {
4736                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4737                 .len = { sizeof(uint32_t), },
4738                 .data = { &backoff, },
4739         };
4740
4741         if (iwm_send_cmd(sc, &cmd) != 0) {
4742                 device_printf(sc->sc_dev,
4743                     "failed to change thermal tx backoff\n");
4744         }
4745 }
4746
4747 static int
4748 iwm_init_hw(struct iwm_softc *sc)
4749 {
4750         struct ieee80211com *ic = &sc->sc_ic;
4751         int error, i, ac;
4752
4753         if ((error = iwm_start_hw(sc)) != 0) {
4754                 printf("iwm_start_hw: failed %d\n", error);
4755                 return error;
4756         }
4757
4758         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4759                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4760                 return error;
4761         }
4762
4763         /*
4764          * should stop and start HW since that INIT
4765          * image just loaded
4766          */
4767         iwm_stop_device(sc);
4768         sc->sc_ps_disabled = FALSE;
4769         if ((error = iwm_start_hw(sc)) != 0) {
4770                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4771                 return error;
4772         }
4773
4774         /* omstart, this time with the regular firmware */
4775         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4776         if (error) {
4777                 device_printf(sc->sc_dev, "could not load firmware\n");
4778                 goto error;
4779         }
4780
4781         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4782                 device_printf(sc->sc_dev, "bt init conf failed\n");
4783                 goto error;
4784         }
4785
4786         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4787         if (error != 0) {
4788                 device_printf(sc->sc_dev, "antenna config failed\n");
4789                 goto error;
4790         }
4791
4792         /* Send phy db control command and then phy db calibration */
4793         if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4794                 goto error;
4795
4796         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4797                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4798                 goto error;
4799         }
4800
4801         /* Add auxiliary station for scanning */
4802         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4803                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4804                 goto error;
4805         }
4806
4807         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4808                 /*
4809                  * The channel used here isn't relevant as it's
4810                  * going to be overwritten in the other flows.
4811                  * For now use the first channel we have.
4812                  */
4813                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4814                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4815                         goto error;
4816         }
4817
4818         /* Initialize tx backoffs to the minimum. */
4819         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4820                 iwm_mvm_tt_tx_backoff(sc, 0);
4821
4822         error = iwm_mvm_power_update_device(sc);
4823         if (error)
4824                 goto error;
4825
4826         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4827                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4828                         goto error;
4829         }
4830
4831         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4832                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4833                         goto error;
4834         }
4835
4836         /* Enable Tx queues. */
4837         for (ac = 0; ac < WME_NUM_AC; ac++) {
4838                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4839                     iwm_mvm_ac_to_tx_fifo[ac]);
4840                 if (error)
4841                         goto error;
4842         }
4843
4844         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4845                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4846                 goto error;
4847         }
4848
4849         return 0;
4850
4851  error:
4852         iwm_stop_device(sc);
4853         return error;
4854 }
4855
4856 /* Allow multicast from our BSSID. */
4857 static int
4858 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4859 {
4860         struct ieee80211_node *ni = vap->iv_bss;
4861         struct iwm_mcast_filter_cmd *cmd;
4862         size_t size;
4863         int error;
4864
4865         size = roundup(sizeof(*cmd), 4);
4866         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4867         if (cmd == NULL)
4868                 return ENOMEM;
4869         cmd->filter_own = 1;
4870         cmd->port_id = 0;
4871         cmd->count = 0;
4872         cmd->pass_all = 1;
4873         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4874
4875         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4876             IWM_CMD_SYNC, size, cmd);
4877         free(cmd, M_DEVBUF);
4878
4879         return (error);
4880 }
4881
4882 /*
4883  * ifnet interfaces
4884  */
4885
4886 static void
4887 iwm_init(struct iwm_softc *sc)
4888 {
4889         int error;
4890
4891         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4892                 return;
4893         }
4894         sc->sc_generation++;
4895         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4896
4897         if ((error = iwm_init_hw(sc)) != 0) {
4898                 printf("iwm_init_hw failed %d\n", error);
4899                 iwm_stop(sc);
4900                 return;
4901         }
4902
4903         /*
4904          * Ok, firmware loaded and we are jogging
4905          */
4906         sc->sc_flags |= IWM_FLAG_HW_INITED;
4907         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4908 }
4909
4910 static int
4911 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4912 {
4913         struct iwm_softc *sc;
4914         int error;
4915
4916         sc = ic->ic_softc;
4917
4918         IWM_LOCK(sc);
4919         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4920                 IWM_UNLOCK(sc);
4921                 return (ENXIO);
4922         }
4923         error = mbufq_enqueue(&sc->sc_snd, m);
4924         if (error) {
4925                 IWM_UNLOCK(sc);
4926                 return (error);
4927         }
4928         iwm_start(sc);
4929         IWM_UNLOCK(sc);
4930         return (0);
4931 }
4932
4933 /*
4934  * Dequeue packets from sendq and call send.
4935  */
4936 static void
4937 iwm_start(struct iwm_softc *sc)
4938 {
4939         struct ieee80211_node *ni;
4940         struct mbuf *m;
4941         int ac = 0;
4942
4943         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4944         while (sc->qfullmsk == 0 &&
4945                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4946                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4947                 if (iwm_tx(sc, m, ni, ac) != 0) {
4948                         if_inc_counter(ni->ni_vap->iv_ifp,
4949                             IFCOUNTER_OERRORS, 1);
4950                         ieee80211_free_node(ni);
4951                         continue;
4952                 }
4953                 sc->sc_tx_timer = 15;
4954         }
4955         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4956 }
4957
4958 static void
4959 iwm_stop(struct iwm_softc *sc)
4960 {
4961
4962         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4963         sc->sc_flags |= IWM_FLAG_STOPPED;
4964         sc->sc_generation++;
4965         iwm_led_blink_stop(sc);
4966         sc->sc_tx_timer = 0;
4967         iwm_stop_device(sc);
4968         sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4969 }
4970
4971 static void
4972 iwm_watchdog(void *arg)
4973 {
4974         struct iwm_softc *sc = arg;
4975         struct ieee80211com *ic = &sc->sc_ic;
4976
4977         if (sc->sc_tx_timer > 0) {
4978                 if (--sc->sc_tx_timer == 0) {
4979                         device_printf(sc->sc_dev, "device timeout\n");
4980 #ifdef IWM_DEBUG
4981                         iwm_nic_error(sc);
4982 #endif
4983                         ieee80211_restart_all(ic);
4984                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4985                         return;
4986                 }
4987         }
4988         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4989 }
4990
4991 static void
4992 iwm_parent(struct ieee80211com *ic)
4993 {
4994         struct iwm_softc *sc = ic->ic_softc;
4995         int startall = 0;
4996
4997         IWM_LOCK(sc);
4998         if (ic->ic_nrunning > 0) {
4999                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5000                         iwm_init(sc);
5001                         startall = 1;
5002                 }
5003         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5004                 iwm_stop(sc);
5005         IWM_UNLOCK(sc);
5006         if (startall)
5007                 ieee80211_start_all(ic);
5008 }
5009
5010 /*
5011  * The interrupt side of things
5012  */
5013
5014 /*
5015  * error dumping routines are from iwlwifi/mvm/utils.c
5016  */
5017
5018 /*
5019  * Note: This structure is read from the device with IO accesses,
5020  * and the reading already does the endian conversion. As it is
5021  * read with uint32_t-sized accesses, any members with a different size
5022  * need to be ordered correctly though!
5023  */
5024 struct iwm_error_event_table {
5025         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5026         uint32_t error_id;              /* type of error */
5027         uint32_t trm_hw_status0;        /* TRM HW status */
5028         uint32_t trm_hw_status1;        /* TRM HW status */
5029         uint32_t blink2;                /* branch link */
5030         uint32_t ilink1;                /* interrupt link */
5031         uint32_t ilink2;                /* interrupt link */
5032         uint32_t data1;         /* error-specific data */
5033         uint32_t data2;         /* error-specific data */
5034         uint32_t data3;         /* error-specific data */
5035         uint32_t bcon_time;             /* beacon timer */
5036         uint32_t tsf_low;               /* network timestamp function timer */
5037         uint32_t tsf_hi;                /* network timestamp function timer */
5038         uint32_t gp1;           /* GP1 timer register */
5039         uint32_t gp2;           /* GP2 timer register */
5040         uint32_t fw_rev_type;   /* firmware revision type */
5041         uint32_t major;         /* uCode version major */
5042         uint32_t minor;         /* uCode version minor */
5043         uint32_t hw_ver;                /* HW Silicon version */
5044         uint32_t brd_ver;               /* HW board version */
5045         uint32_t log_pc;                /* log program counter */
5046         uint32_t frame_ptr;             /* frame pointer */
5047         uint32_t stack_ptr;             /* stack pointer */
5048         uint32_t hcmd;          /* last host command header */
5049         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
5050                                  * rxtx_flag */
5051         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
5052                                  * host_flag */
5053         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
5054                                  * enc_flag */
5055         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
5056                                  * time_flag */
5057         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
5058                                  * wico interrupt */
5059         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
5060         uint32_t wait_event;            /* wait event() caller address */
5061         uint32_t l2p_control;   /* L2pControlField */
5062         uint32_t l2p_duration;  /* L2pDurationField */
5063         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
5064         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
5065         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
5066                                  * (LMPM_PMG_SEL) */
5067         uint32_t u_timestamp;   /* indicate when the date and time of the
5068                                  * compilation */
5069         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
5070 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5071
5072 /*
5073  * UMAC error struct - relevant starting from family 8000 chip.
5074  * Note: This structure is read from the device with IO accesses,
5075  * and the reading already does the endian conversion. As it is
5076  * read with u32-sized accesses, any members with a different size
5077  * need to be ordered correctly though!
5078  */
5079 struct iwm_umac_error_event_table {
5080         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5081         uint32_t error_id;      /* type of error */
5082         uint32_t blink1;        /* branch link */
5083         uint32_t blink2;        /* branch link */
5084         uint32_t ilink1;        /* interrupt link */
5085         uint32_t ilink2;        /* interrupt link */
5086         uint32_t data1;         /* error-specific data */
5087         uint32_t data2;         /* error-specific data */
5088         uint32_t data3;         /* error-specific data */
5089         uint32_t umac_major;
5090         uint32_t umac_minor;
5091         uint32_t frame_pointer; /* core register 27*/
5092         uint32_t stack_pointer; /* core register 28 */
5093         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
5094         uint32_t nic_isr_pref;  /* ISR status register */
5095 } __packed;
5096
5097 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5098 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5099
5100 #ifdef IWM_DEBUG
5101 struct {
5102         const char *name;
5103         uint8_t num;
5104 } advanced_lookup[] = {
5105         { "NMI_INTERRUPT_WDG", 0x34 },
5106         { "SYSASSERT", 0x35 },
5107         { "UCODE_VERSION_MISMATCH", 0x37 },
5108         { "BAD_COMMAND", 0x38 },
5109         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5110         { "FATAL_ERROR", 0x3D },
5111         { "NMI_TRM_HW_ERR", 0x46 },
5112         { "NMI_INTERRUPT_TRM", 0x4C },
5113         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5114         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5115         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5116         { "NMI_INTERRUPT_HOST", 0x66 },
5117         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5118         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5119         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5120         { "ADVANCED_SYSASSERT", 0 },
5121 };
5122
5123 static const char *
5124 iwm_desc_lookup(uint32_t num)
5125 {
5126         int i;
5127
5128         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5129                 if (advanced_lookup[i].num == num)
5130                         return advanced_lookup[i].name;
5131
5132         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5133         return advanced_lookup[i].name;
5134 }
5135
5136 static void
5137 iwm_nic_umac_error(struct iwm_softc *sc)
5138 {
5139         struct iwm_umac_error_event_table table;
5140         uint32_t base;
5141
5142         base = sc->umac_error_event_table;
5143
5144         if (base < 0x800000) {
5145                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5146                     base);
5147                 return;
5148         }
5149
5150         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5151                 device_printf(sc->sc_dev, "reading errlog failed\n");
5152                 return;
5153         }
5154
5155         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5156                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5157                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5158                     sc->sc_flags, table.valid);
5159         }
5160
5161         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5162                 iwm_desc_lookup(table.error_id));
5163         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5164         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5165         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5166             table.ilink1);
5167         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5168             table.ilink2);
5169         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5170         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5171         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5172         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5173         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5174         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5175             table.frame_pointer);
5176         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5177             table.stack_pointer);
5178         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5179         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5180             table.nic_isr_pref);
5181 }
5182
5183 /*
5184  * Support for dumping the error log seemed like a good idea ...
5185  * but it's mostly hex junk and the only sensible thing is the
5186  * hw/ucode revision (which we know anyway).  Since it's here,
5187  * I'll just leave it in, just in case e.g. the Intel guys want to
5188  * help us decipher some "ADVANCED_SYSASSERT" later.
5189  */
5190 static void
5191 iwm_nic_error(struct iwm_softc *sc)
5192 {
5193         struct iwm_error_event_table table;
5194         uint32_t base;
5195
5196         device_printf(sc->sc_dev, "dumping device error log\n");
5197         base = sc->error_event_table;
5198         if (base < 0x800000) {
5199                 device_printf(sc->sc_dev,
5200                     "Invalid error log pointer 0x%08x\n", base);
5201                 return;
5202         }
5203
5204         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5205                 device_printf(sc->sc_dev, "reading errlog failed\n");
5206                 return;
5207         }
5208
5209         if (!table.valid) {
5210                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5211                 return;
5212         }
5213
5214         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5215                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5216                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5217                     sc->sc_flags, table.valid);
5218         }
5219
5220         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5221             iwm_desc_lookup(table.error_id));
5222         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5223             table.trm_hw_status0);
5224         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5225             table.trm_hw_status1);
5226         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5227         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5228         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5229         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5230         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5231         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5232         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5233         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5234         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5235         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5236         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5237         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5238             table.fw_rev_type);
5239         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5240         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5241         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5242         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5243         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5244         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5245         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5246         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5247         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5248         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5249         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5250         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5251         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5252         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5253         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5254         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5255         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5256         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5257         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5258
5259         if (sc->umac_error_event_table)
5260                 iwm_nic_umac_error(sc);
5261 }
5262 #endif
5263
5264 static void
5265 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5266 {
5267         struct ieee80211com *ic = &sc->sc_ic;
5268         struct iwm_cmd_response *cresp;
5269         struct mbuf *m1;
5270         uint32_t offset = 0;
5271         uint32_t maxoff = IWM_RBUF_SIZE;
5272         uint32_t nextoff;
5273         boolean_t stolen = FALSE;
5274
5275 #define HAVEROOM(a)     \
5276     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5277
5278         while (HAVEROOM(offset)) {
5279                 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5280                     offset);
5281                 int qid, idx, code, len;
5282
5283                 qid = pkt->hdr.qid;
5284                 idx = pkt->hdr.idx;
5285
5286                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5287
5288                 /*
5289                  * randomly get these from the firmware, no idea why.
5290                  * they at least seem harmless, so just ignore them for now
5291                  */
5292                 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5293                     pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5294                         break;
5295                 }
5296
5297                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5298                     "rx packet qid=%d idx=%d type=%x\n",
5299                     qid & ~0x80, pkt->hdr.idx, code);
5300
5301                 len = le32toh(pkt->len_n_flags) & IWM_FH_RSCSR_FRAME_SIZE_MSK;
5302                 len += sizeof(uint32_t); /* account for status word */
5303                 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5304
5305                 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5306
5307                 switch (code) {
5308                 case IWM_REPLY_RX_PHY_CMD:
5309                         iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5310                         break;
5311
5312                 case IWM_REPLY_RX_MPDU_CMD: {
5313                         /*
5314                          * If this is the last frame in the RX buffer, we
5315                          * can directly feed the mbuf to the sharks here.
5316                          */
5317                         struct iwm_rx_packet *nextpkt = mtodoff(m,
5318                             struct iwm_rx_packet *, nextoff);
5319                         if (!HAVEROOM(nextoff) ||
5320                             (nextpkt->hdr.code == 0 &&
5321                              (nextpkt->hdr.qid & ~0x80) == 0 &&
5322                              nextpkt->hdr.idx == 0) ||
5323                             (nextpkt->len_n_flags ==
5324                              htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5325                                 if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5326                                         stolen = FALSE;
5327                                         /* Make sure we abort the loop */
5328                                         nextoff = maxoff;
5329                                 }
5330                                 break;
5331                         }
5332
5333                         /*
5334                          * Use m_copym instead of m_split, because that
5335                          * makes it easier to keep a valid rx buffer in
5336                          * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5337                          *
5338                          * We need to start m_copym() at offset 0, to get the
5339                          * M_PKTHDR flag preserved.
5340                          */
5341                         m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5342                         if (m1) {
5343                                 if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5344                                         stolen = TRUE;
5345                                 else
5346                                         m_freem(m1);
5347                         }
5348                         break;
5349                 }
5350
5351                 case IWM_TX_CMD:
5352                         iwm_mvm_rx_tx_cmd(sc, pkt);
5353                         break;
5354
5355                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5356                         struct iwm_missed_beacons_notif *resp;
5357                         int missed;
5358
5359                         /* XXX look at mac_id to determine interface ID */
5360                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5361
5362                         resp = (void *)pkt->data;
5363                         missed = le32toh(resp->consec_missed_beacons);
5364
5365                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5366                             "%s: MISSED_BEACON: mac_id=%d, "
5367                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5368                             "num_rx=%d\n",
5369                             __func__,
5370                             le32toh(resp->mac_id),
5371                             le32toh(resp->consec_missed_beacons_since_last_rx),
5372                             le32toh(resp->consec_missed_beacons),
5373                             le32toh(resp->num_expected_beacons),
5374                             le32toh(resp->num_recvd_beacons));
5375
5376                         /* Be paranoid */
5377                         if (vap == NULL)
5378                                 break;
5379
5380                         /* XXX no net80211 locking? */
5381                         if (vap->iv_state == IEEE80211_S_RUN &&
5382                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5383                                 if (missed > vap->iv_bmissthreshold) {
5384                                         /* XXX bad locking; turn into task */
5385                                         IWM_UNLOCK(sc);
5386                                         ieee80211_beacon_miss(ic);
5387                                         IWM_LOCK(sc);
5388                                 }
5389                         }
5390
5391                         break;
5392                 }
5393
5394                 case IWM_MFUART_LOAD_NOTIFICATION:
5395                         break;
5396
5397                 case IWM_MVM_ALIVE:
5398                         break;
5399
5400                 case IWM_CALIB_RES_NOTIF_PHY_DB:
5401                         break;
5402
5403                 case IWM_STATISTICS_NOTIFICATION: {
5404                         struct iwm_notif_statistics *stats;
5405                         stats = (void *)pkt->data;
5406                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5407                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5408                         break;
5409                 }
5410
5411                 case IWM_NVM_ACCESS_CMD:
5412                 case IWM_MCC_UPDATE_CMD:
5413                         if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5414                                 memcpy(sc->sc_cmd_resp,
5415                                     pkt, sizeof(sc->sc_cmd_resp));
5416                         }
5417                         break;
5418
5419                 case IWM_MCC_CHUB_UPDATE_CMD: {
5420                         struct iwm_mcc_chub_notif *notif;
5421                         notif = (void *)pkt->data;
5422
5423                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5424                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5425                         sc->sc_fw_mcc[2] = '\0';
5426                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5427                             "fw source %d sent CC '%s'\n",
5428                             notif->source_id, sc->sc_fw_mcc);
5429                         break;
5430                 }
5431
5432                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5433                 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5434                                  IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5435                         struct iwm_dts_measurement_notif_v1 *notif;
5436
5437                         if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5438                                 device_printf(sc->sc_dev,
5439                                     "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5440                                 break;
5441                         }
5442                         notif = (void *)pkt->data;
5443                         IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5444                             "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5445                             notif->temp);
5446                         break;
5447                 }
5448
5449                 case IWM_PHY_CONFIGURATION_CMD:
5450                 case IWM_TX_ANT_CONFIGURATION_CMD:
5451                 case IWM_ADD_STA:
5452                 case IWM_MAC_CONTEXT_CMD:
5453                 case IWM_REPLY_SF_CFG_CMD:
5454                 case IWM_POWER_TABLE_CMD:
5455                 case IWM_PHY_CONTEXT_CMD:
5456                 case IWM_BINDING_CONTEXT_CMD:
5457                 case IWM_TIME_EVENT_CMD:
5458                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5459                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5460                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5461                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5462                 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5463                 case IWM_REPLY_BEACON_FILTERING_CMD:
5464                 case IWM_MAC_PM_POWER_TABLE:
5465                 case IWM_TIME_QUOTA_CMD:
5466                 case IWM_REMOVE_STA:
5467                 case IWM_TXPATH_FLUSH:
5468                 case IWM_LQ_CMD:
5469                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5470                                  IWM_FW_PAGING_BLOCK_CMD):
5471                 case IWM_BT_CONFIG:
5472                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5473                         cresp = (void *)pkt->data;
5474                         if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5475                                 memcpy(sc->sc_cmd_resp,
5476                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5477                         }
5478                         break;
5479
5480                 /* ignore */
5481                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5482                         break;
5483
5484                 case IWM_INIT_COMPLETE_NOTIF:
5485                         break;
5486
5487                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5488                         struct iwm_periodic_scan_complete *notif;
5489                         notif = (void *)pkt->data;
5490                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5491                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5492                                 ieee80211_runtask(ic, &sc->sc_es_task);
5493                         }
5494                         break;
5495                 }
5496
5497                 case IWM_SCAN_ITERATION_COMPLETE: {
5498                         struct iwm_lmac_scan_complete_notif *notif;
5499                         notif = (void *)pkt->data;
5500                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5501                         break;
5502                 }
5503  
5504                 case IWM_SCAN_COMPLETE_UMAC: {
5505                         struct iwm_umac_scan_complete *notif;
5506                         notif = (void *)pkt->data;
5507
5508                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5509                             "UMAC scan complete, status=0x%x\n",
5510                             notif->status);
5511                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5512                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5513                                 ieee80211_runtask(ic, &sc->sc_es_task);
5514                         }
5515                         break;
5516                 }
5517
5518                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5519                         struct iwm_umac_scan_iter_complete_notif *notif;
5520                         notif = (void *)pkt->data;
5521
5522                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5523                             "complete, status=0x%x, %d channels scanned\n",
5524                             notif->status, notif->scanned_channels);
5525                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5526                         break;
5527                 }
5528
5529                 case IWM_REPLY_ERROR: {
5530                         struct iwm_error_resp *resp;
5531                         resp = (void *)pkt->data;
5532
5533                         device_printf(sc->sc_dev,
5534                             "firmware error 0x%x, cmd 0x%x\n",
5535                             le32toh(resp->error_type),
5536                             resp->cmd_id);
5537                         break;
5538                 }
5539
5540                 case IWM_TIME_EVENT_NOTIFICATION: {
5541                         struct iwm_time_event_notif *notif;
5542                         notif = (void *)pkt->data;
5543
5544                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5545                             "TE notif status = 0x%x action = 0x%x\n",
5546                             notif->status, notif->action);
5547                         break;
5548                 }
5549
5550                 case IWM_MCAST_FILTER_CMD:
5551                         break;
5552
5553                 case IWM_SCD_QUEUE_CFG: {
5554                         struct iwm_scd_txq_cfg_rsp *rsp;
5555                         rsp = (void *)pkt->data;
5556
5557                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5558                             "queue cfg token=0x%x sta_id=%d "
5559                             "tid=%d scd_queue=%d\n",
5560                             rsp->token, rsp->sta_id, rsp->tid,
5561                             rsp->scd_queue);
5562                         break;
5563                 }
5564
5565                 default:
5566                         device_printf(sc->sc_dev,
5567                             "frame %d/%d %x UNHANDLED (this should "
5568                             "not happen)\n", qid & ~0x80, idx,
5569                             pkt->len_n_flags);
5570                         break;
5571                 }
5572
5573                 /*
5574                  * Why test bit 0x80?  The Linux driver:
5575                  *
5576                  * There is one exception:  uCode sets bit 15 when it
5577                  * originates the response/notification, i.e. when the
5578                  * response/notification is not a direct response to a
5579                  * command sent by the driver.  For example, uCode issues
5580                  * IWM_REPLY_RX when it sends a received frame to the driver;
5581                  * it is not a direct response to any driver command.
5582                  *
5583                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5584                  * uses a slightly different format for pkt->hdr, and "qid"
5585                  * is actually the upper byte of a two-byte field.
5586                  */
5587                 if (!(qid & (1 << 7)))
5588                         iwm_cmd_done(sc, pkt);
5589
5590                 offset = nextoff;
5591         }
5592         if (stolen)
5593                 m_freem(m);
5594 #undef HAVEROOM
5595 }
5596
5597 /*
5598  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5599  * Basic structure from if_iwn
5600  */
5601 static void
5602 iwm_notif_intr(struct iwm_softc *sc)
5603 {
5604         uint16_t hw;
5605
5606         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5607             BUS_DMASYNC_POSTREAD);
5608
5609         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5610
5611         /*
5612          * Process responses
5613          */
5614         while (sc->rxq.cur != hw) {
5615                 struct iwm_rx_ring *ring = &sc->rxq;
5616                 struct iwm_rx_data *data = &ring->data[ring->cur];
5617
5618                 bus_dmamap_sync(ring->data_dmat, data->map,
5619                     BUS_DMASYNC_POSTREAD);
5620
5621                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5622                     "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5623                 iwm_handle_rxb(sc, data->m);
5624
5625                 ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5626         }
5627
5628         /*
5629          * Tell the firmware that it can reuse the ring entries that
5630          * we have just processed.
5631          * Seems like the hardware gets upset unless we align
5632          * the write by 8??
5633          */
5634         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5635         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5636 }
5637
5638 static void
5639 iwm_intr(void *arg)
5640 {
5641         struct iwm_softc *sc = arg;
5642         int handled = 0;
5643         int r1, r2, rv = 0;
5644         int isperiodic = 0;
5645
5646         IWM_LOCK(sc);
5647         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5648
5649         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5650                 uint32_t *ict = sc->ict_dma.vaddr;
5651                 int tmp;
5652
5653                 tmp = htole32(ict[sc->ict_cur]);
5654                 if (!tmp)
5655                         goto out_ena;
5656
5657                 /*
5658                  * ok, there was something.  keep plowing until we have all.
5659                  */
5660                 r1 = r2 = 0;
5661                 while (tmp) {
5662                         r1 |= tmp;
5663                         ict[sc->ict_cur] = 0;
5664                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5665                         tmp = htole32(ict[sc->ict_cur]);
5666                 }
5667
5668                 /* this is where the fun begins.  don't ask */
5669                 if (r1 == 0xffffffff)
5670                         r1 = 0;
5671
5672                 /* i am not expected to understand this */
5673                 if (r1 & 0xc0000)
5674                         r1 |= 0x8000;
5675                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5676         } else {
5677                 r1 = IWM_READ(sc, IWM_CSR_INT);
5678                 /* "hardware gone" (where, fishing?) */
5679                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5680                         goto out;
5681                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5682         }
5683         if (r1 == 0 && r2 == 0) {
5684                 goto out_ena;
5685         }
5686
5687         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5688
5689         /* Safely ignore these bits for debug checks below */
5690         r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5691
5692         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5693                 int i;
5694                 struct ieee80211com *ic = &sc->sc_ic;
5695                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5696
5697 #ifdef IWM_DEBUG
5698                 iwm_nic_error(sc);
5699 #endif
5700                 /* Dump driver status (TX and RX rings) while we're here. */
5701                 device_printf(sc->sc_dev, "driver status:\n");
5702                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5703                         struct iwm_tx_ring *ring = &sc->txq[i];
5704                         device_printf(sc->sc_dev,
5705                             "  tx ring %2d: qid=%-2d cur=%-3d "
5706                             "queued=%-3d\n",
5707                             i, ring->qid, ring->cur, ring->queued);
5708                 }
5709                 device_printf(sc->sc_dev,
5710                     "  rx ring: cur=%d\n", sc->rxq.cur);
5711                 device_printf(sc->sc_dev,
5712                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5713
5714                 /* Don't stop the device; just do a VAP restart */
5715                 IWM_UNLOCK(sc);
5716
5717                 if (vap == NULL) {
5718                         printf("%s: null vap\n", __func__);
5719                         return;
5720                 }
5721
5722                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5723                     "restarting\n", __func__, vap->iv_state);
5724
5725                 ieee80211_restart_all(ic);
5726                 return;
5727         }
5728
5729         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5730                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5731                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5732                 iwm_stop(sc);
5733                 rv = 1;
5734                 goto out;
5735         }
5736
5737         /* firmware chunk loaded */
5738         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5739                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5740                 handled |= IWM_CSR_INT_BIT_FH_TX;
5741                 sc->sc_fw_chunk_done = 1;
5742                 wakeup(&sc->sc_fw);
5743         }
5744
5745         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5746                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5747                 if (iwm_check_rfkill(sc)) {
5748                         device_printf(sc->sc_dev,
5749                             "%s: rfkill switch, disabling interface\n",
5750                             __func__);
5751                         iwm_stop(sc);
5752                 }
5753         }
5754
5755         /*
5756          * The Linux driver uses periodic interrupts to avoid races.
5757          * We cargo-cult like it's going out of fashion.
5758          */
5759         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5760                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5761                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5762                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5763                         IWM_WRITE_1(sc,
5764                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5765                 isperiodic = 1;
5766         }
5767
5768         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5769                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5770                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5771
5772                 iwm_notif_intr(sc);
5773
5774                 /* enable periodic interrupt, see above */
5775                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5776                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5777                             IWM_CSR_INT_PERIODIC_ENA);
5778         }
5779
5780         if (__predict_false(r1 & ~handled))
5781                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5782                     "%s: unhandled interrupts: %x\n", __func__, r1);
5783         rv = 1;
5784
5785  out_ena:
5786         iwm_restore_interrupts(sc);
5787  out:
5788         IWM_UNLOCK(sc);
5789         return;
5790 }
5791
5792 /*
5793  * Autoconf glue-sniffing
5794  */
5795 #define PCI_VENDOR_INTEL                0x8086
5796 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5797 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5798 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5799 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5800 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5801 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5802 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5803 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5804 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5805 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5806
5807 static const struct iwm_devices {
5808         uint16_t                device;
5809         const struct iwm_cfg    *cfg;
5810 } iwm_devices[] = {
5811         { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5812         { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5813         { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5814         { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5815         { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5816         { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5817         { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5818         { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5819         { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5820         { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5821 };
5822
5823 static int
5824 iwm_probe(device_t dev)
5825 {
5826         int i;
5827
5828         for (i = 0; i < nitems(iwm_devices); i++) {
5829                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5830                     pci_get_device(dev) == iwm_devices[i].device) {
5831                         device_set_desc(dev, iwm_devices[i].cfg->name);
5832                         return (BUS_PROBE_DEFAULT);
5833                 }
5834         }
5835
5836         return (ENXIO);
5837 }
5838
5839 static int
5840 iwm_dev_check(device_t dev)
5841 {
5842         struct iwm_softc *sc;
5843         uint16_t devid;
5844         int i;
5845
5846         sc = device_get_softc(dev);
5847
5848         devid = pci_get_device(dev);
5849         for (i = 0; i < nitems(iwm_devices); i++) {
5850                 if (iwm_devices[i].device == devid) {
5851                         sc->cfg = iwm_devices[i].cfg;
5852                         return (0);
5853                 }
5854         }
5855         device_printf(dev, "unknown adapter type\n");
5856         return ENXIO;
5857 }
5858
5859 /* PCI registers */
5860 #define PCI_CFG_RETRY_TIMEOUT   0x041
5861
5862 static int
5863 iwm_pci_attach(device_t dev)
5864 {
5865         struct iwm_softc *sc;
5866         int count, error, rid;
5867         uint16_t reg;
5868
5869         sc = device_get_softc(dev);
5870
5871         /* We disable the RETRY_TIMEOUT register (0x41) to keep
5872          * PCI Tx retries from interfering with C3 CPU state */
5873         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5874
5875         /* Enable bus-mastering and hardware bug workaround. */
5876         pci_enable_busmaster(dev);
5877         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5878         /* if !MSI */
5879         if (reg & PCIM_STATUS_INTxSTATE) {
5880                 reg &= ~PCIM_STATUS_INTxSTATE;
5881         }
5882         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5883
5884         rid = PCIR_BAR(0);
5885         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5886             RF_ACTIVE);
5887         if (sc->sc_mem == NULL) {
5888                 device_printf(sc->sc_dev, "can't map mem space\n");
5889                 return (ENXIO);
5890         }
5891         sc->sc_st = rman_get_bustag(sc->sc_mem);
5892         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5893
5894         /* Install interrupt handler. */
5895         count = 1;
5896         rid = 0;
5897         if (pci_alloc_msi(dev, &count) == 0)
5898                 rid = 1;
5899         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5900             (rid != 0 ? 0 : RF_SHAREABLE));
5901         if (sc->sc_irq == NULL) {
5902                 device_printf(dev, "can't map interrupt\n");
5903                         return (ENXIO);
5904         }
5905         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5906             NULL, iwm_intr, sc, &sc->sc_ih);
5907         if (sc->sc_ih == NULL) {
5908                 device_printf(dev, "can't establish interrupt");
5909                         return (ENXIO);
5910         }
5911         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5912
5913         return (0);
5914 }
5915
5916 static void
5917 iwm_pci_detach(device_t dev)
5918 {
5919         struct iwm_softc *sc = device_get_softc(dev);
5920
5921         if (sc->sc_irq != NULL) {
5922                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5923                 bus_release_resource(dev, SYS_RES_IRQ,
5924                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5925                 pci_release_msi(dev);
5926         }
5927         if (sc->sc_mem != NULL)
5928                 bus_release_resource(dev, SYS_RES_MEMORY,
5929                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5930 }
5931
5932
5933
5934 static int
5935 iwm_attach(device_t dev)
5936 {
5937         struct iwm_softc *sc = device_get_softc(dev);
5938         struct ieee80211com *ic = &sc->sc_ic;
5939         int error;
5940         int txq_i, i;
5941
5942         sc->sc_dev = dev;
5943         sc->sc_attached = 1;
5944         IWM_LOCK_INIT(sc);
5945         mbufq_init(&sc->sc_snd, ifqmaxlen);
5946         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5947         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5948         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5949
5950         sc->sc_notif_wait = iwm_notification_wait_init(sc);
5951         if (sc->sc_notif_wait == NULL) {
5952                 device_printf(dev, "failed to init notification wait struct\n");
5953                 goto fail;
5954         }
5955
5956         /* Init phy db */
5957         sc->sc_phy_db = iwm_phy_db_init(sc);
5958         if (!sc->sc_phy_db) {
5959                 device_printf(dev, "Cannot init phy_db\n");
5960                 goto fail;
5961         }
5962
5963         /* PCI attach */
5964         error = iwm_pci_attach(dev);
5965         if (error != 0)
5966                 goto fail;
5967
5968         sc->sc_wantresp = -1;
5969
5970         /* Check device type */
5971         error = iwm_dev_check(dev);
5972         if (error != 0)
5973                 goto fail;
5974
5975         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5976         /*
5977          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5978          * changed, and now the revision step also includes bit 0-1 (no more
5979          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5980          * in the old format.
5981          */
5982         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
5983                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5984                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5985
5986         if (iwm_prepare_card_hw(sc) != 0) {
5987                 device_printf(dev, "could not initialize hardware\n");
5988                 goto fail;
5989         }
5990
5991         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5992                 int ret;
5993                 uint32_t hw_step;
5994
5995                 /*
5996                  * In order to recognize C step the driver should read the
5997                  * chip version id located at the AUX bus MISC address.
5998                  */
5999                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6000                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6001                 DELAY(2);
6002
6003                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6004                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6005                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6006                                    25000);
6007                 if (!ret) {
6008                         device_printf(sc->sc_dev,
6009                             "Failed to wake up the nic\n");
6010                         goto fail;
6011                 }
6012
6013                 if (iwm_nic_lock(sc)) {
6014                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6015                         hw_step |= IWM_ENABLE_WFPM;
6016                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6017                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6018                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6019                         if (hw_step == 0x3)
6020                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6021                                                 (IWM_SILICON_C_STEP << 2);
6022                         iwm_nic_unlock(sc);
6023                 } else {
6024                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
6025                         goto fail;
6026                 }
6027         }
6028
6029         /* special-case 7265D, it has the same PCI IDs. */
6030         if (sc->cfg == &iwm7265_cfg &&
6031             (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6032                 sc->cfg = &iwm7265d_cfg;
6033         }
6034
6035         /* Allocate DMA memory for firmware transfers. */
6036         if ((error = iwm_alloc_fwmem(sc)) != 0) {
6037                 device_printf(dev, "could not allocate memory for firmware\n");
6038                 goto fail;
6039         }
6040
6041         /* Allocate "Keep Warm" page. */
6042         if ((error = iwm_alloc_kw(sc)) != 0) {
6043                 device_printf(dev, "could not allocate keep warm page\n");
6044                 goto fail;
6045         }
6046
6047         /* We use ICT interrupts */
6048         if ((error = iwm_alloc_ict(sc)) != 0) {
6049                 device_printf(dev, "could not allocate ICT table\n");
6050                 goto fail;
6051         }
6052
6053         /* Allocate TX scheduler "rings". */
6054         if ((error = iwm_alloc_sched(sc)) != 0) {
6055                 device_printf(dev, "could not allocate TX scheduler rings\n");
6056                 goto fail;
6057         }
6058
6059         /* Allocate TX rings */
6060         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6061                 if ((error = iwm_alloc_tx_ring(sc,
6062                     &sc->txq[txq_i], txq_i)) != 0) {
6063                         device_printf(dev,
6064                             "could not allocate TX ring %d\n",
6065                             txq_i);
6066                         goto fail;
6067                 }
6068         }
6069
6070         /* Allocate RX ring. */
6071         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6072                 device_printf(dev, "could not allocate RX ring\n");
6073                 goto fail;
6074         }
6075
6076         /* Clear pending interrupts. */
6077         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6078
6079         ic->ic_softc = sc;
6080         ic->ic_name = device_get_nameunit(sc->sc_dev);
6081         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
6082         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
6083
6084         /* Set device capabilities. */
6085         ic->ic_caps =
6086             IEEE80211_C_STA |
6087             IEEE80211_C_WPA |           /* WPA/RSN */
6088             IEEE80211_C_WME |
6089             IEEE80211_C_PMGT |
6090             IEEE80211_C_SHSLOT |        /* short slot time supported */
6091             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
6092 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
6093             ;
6094         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6095                 sc->sc_phyctxt[i].id = i;
6096                 sc->sc_phyctxt[i].color = 0;
6097                 sc->sc_phyctxt[i].ref = 0;
6098                 sc->sc_phyctxt[i].channel = NULL;
6099         }
6100
6101         /* Default noise floor */
6102         sc->sc_noise = -96;
6103
6104         /* Max RSSI */
6105         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6106
6107         sc->sc_preinit_hook.ich_func = iwm_preinit;
6108         sc->sc_preinit_hook.ich_arg = sc;
6109         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6110                 device_printf(dev, "config_intrhook_establish failed\n");
6111                 goto fail;
6112         }
6113
6114 #ifdef IWM_DEBUG
6115         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6116             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6117             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6118 #endif
6119
6120         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6121             "<-%s\n", __func__);
6122
6123         return 0;
6124
6125         /* Free allocated memory if something failed during attachment. */
6126 fail:
6127         iwm_detach_local(sc, 0);
6128
6129         return ENXIO;
6130 }
6131
6132 static int
6133 iwm_is_valid_ether_addr(uint8_t *addr)
6134 {
6135         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6136
6137         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6138                 return (FALSE);
6139
6140         return (TRUE);
6141 }
6142
6143 static int
6144 iwm_wme_update(struct ieee80211com *ic)
6145 {
6146 #define IWM_EXP2(x)     ((1 << (x)) - 1)        /* CWmin = 2^ECWmin - 1 */
6147         struct iwm_softc *sc = ic->ic_softc;
6148         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6149         struct iwm_vap *ivp = IWM_VAP(vap);
6150         struct iwm_node *in;
6151         struct wmeParams tmp[WME_NUM_AC];
6152         int aci, error;
6153
6154         if (vap == NULL)
6155                 return (0);
6156
6157         IEEE80211_LOCK(ic);
6158         for (aci = 0; aci < WME_NUM_AC; aci++)
6159                 tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6160         IEEE80211_UNLOCK(ic);
6161
6162         IWM_LOCK(sc);
6163         for (aci = 0; aci < WME_NUM_AC; aci++) {
6164                 const struct wmeParams *ac = &tmp[aci];
6165                 ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6166                 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6167                 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6168                 ivp->queue_params[aci].edca_txop =
6169                     IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6170         }
6171         ivp->have_wme = TRUE;
6172         if (ivp->is_uploaded && vap->iv_bss != NULL) {
6173                 in = IWM_NODE(vap->iv_bss);
6174                 if (in->in_assoc) {
6175                         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6176                                 device_printf(sc->sc_dev,
6177                                     "%s: failed to update MAC\n", __func__);
6178                         }
6179                 }
6180         }
6181         IWM_UNLOCK(sc);
6182
6183         return (0);
6184 #undef IWM_EXP2
6185 }
6186
6187 static void
6188 iwm_preinit(void *arg)
6189 {
6190         struct iwm_softc *sc = arg;
6191         device_t dev = sc->sc_dev;
6192         struct ieee80211com *ic = &sc->sc_ic;
6193         int error;
6194
6195         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6196             "->%s\n", __func__);
6197
6198         IWM_LOCK(sc);
6199         if ((error = iwm_start_hw(sc)) != 0) {
6200                 device_printf(dev, "could not initialize hardware\n");
6201                 IWM_UNLOCK(sc);
6202                 goto fail;
6203         }
6204
6205         error = iwm_run_init_mvm_ucode(sc, 1);
6206         iwm_stop_device(sc);
6207         if (error) {
6208                 IWM_UNLOCK(sc);
6209                 goto fail;
6210         }
6211         device_printf(dev,
6212             "hw rev 0x%x, fw ver %s, address %s\n",
6213             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6214             sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6215
6216         /* not all hardware can do 5GHz band */
6217         if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6218                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6219                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6220         IWM_UNLOCK(sc);
6221
6222         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6223             ic->ic_channels);
6224
6225         /*
6226          * At this point we've committed - if we fail to do setup,
6227          * we now also have to tear down the net80211 state.
6228          */
6229         ieee80211_ifattach(ic);
6230         ic->ic_vap_create = iwm_vap_create;
6231         ic->ic_vap_delete = iwm_vap_delete;
6232         ic->ic_raw_xmit = iwm_raw_xmit;
6233         ic->ic_node_alloc = iwm_node_alloc;
6234         ic->ic_scan_start = iwm_scan_start;
6235         ic->ic_scan_end = iwm_scan_end;
6236         ic->ic_update_mcast = iwm_update_mcast;
6237         ic->ic_getradiocaps = iwm_init_channel_map;
6238         ic->ic_set_channel = iwm_set_channel;
6239         ic->ic_scan_curchan = iwm_scan_curchan;
6240         ic->ic_scan_mindwell = iwm_scan_mindwell;
6241         ic->ic_wme.wme_update = iwm_wme_update;
6242         ic->ic_parent = iwm_parent;
6243         ic->ic_transmit = iwm_transmit;
6244         iwm_radiotap_attach(sc);
6245         if (bootverbose)
6246                 ieee80211_announce(ic);
6247
6248         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6249             "<-%s\n", __func__);
6250         config_intrhook_disestablish(&sc->sc_preinit_hook);
6251
6252         return;
6253 fail:
6254         config_intrhook_disestablish(&sc->sc_preinit_hook);
6255         iwm_detach_local(sc, 0);
6256 }
6257
6258 /*
6259  * Attach the interface to 802.11 radiotap.
6260  */
6261 static void
6262 iwm_radiotap_attach(struct iwm_softc *sc)
6263 {
6264         struct ieee80211com *ic = &sc->sc_ic;
6265
6266         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6267             "->%s begin\n", __func__);
6268         ieee80211_radiotap_attach(ic,
6269             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6270                 IWM_TX_RADIOTAP_PRESENT,
6271             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6272                 IWM_RX_RADIOTAP_PRESENT);
6273         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6274             "->%s end\n", __func__);
6275 }
6276
6277 static struct ieee80211vap *
6278 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6279     enum ieee80211_opmode opmode, int flags,
6280     const uint8_t bssid[IEEE80211_ADDR_LEN],
6281     const uint8_t mac[IEEE80211_ADDR_LEN])
6282 {
6283         struct iwm_vap *ivp;
6284         struct ieee80211vap *vap;
6285
6286         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6287                 return NULL;
6288         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6289         vap = &ivp->iv_vap;
6290         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6291         vap->iv_bmissthreshold = 10;            /* override default */
6292         /* Override with driver methods. */
6293         ivp->iv_newstate = vap->iv_newstate;
6294         vap->iv_newstate = iwm_newstate;
6295
6296         ivp->id = IWM_DEFAULT_MACID;
6297         ivp->color = IWM_DEFAULT_COLOR;
6298
6299         ivp->have_wme = FALSE;
6300
6301         ieee80211_ratectl_init(vap);
6302         /* Complete setup. */
6303         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6304             mac);
6305         ic->ic_opmode = opmode;
6306
6307         return vap;
6308 }
6309
6310 static void
6311 iwm_vap_delete(struct ieee80211vap *vap)
6312 {
6313         struct iwm_vap *ivp = IWM_VAP(vap);
6314
6315         ieee80211_ratectl_deinit(vap);
6316         ieee80211_vap_detach(vap);
6317         free(ivp, M_80211_VAP);
6318 }
6319
6320 static void
6321 iwm_scan_start(struct ieee80211com *ic)
6322 {
6323         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6324         struct iwm_softc *sc = ic->ic_softc;
6325         int error;
6326
6327         IWM_LOCK(sc);
6328         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6329                 /* This should not be possible */
6330                 device_printf(sc->sc_dev,
6331                     "%s: Previous scan not completed yet\n", __func__);
6332         }
6333         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6334                 error = iwm_mvm_umac_scan(sc);
6335         else
6336                 error = iwm_mvm_lmac_scan(sc);
6337         if (error != 0) {
6338                 device_printf(sc->sc_dev, "could not initiate scan\n");
6339                 IWM_UNLOCK(sc);
6340                 ieee80211_cancel_scan(vap);
6341         } else {
6342                 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6343                 iwm_led_blink_start(sc);
6344                 IWM_UNLOCK(sc);
6345         }
6346 }
6347
6348 static void
6349 iwm_scan_end(struct ieee80211com *ic)
6350 {
6351         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6352         struct iwm_softc *sc = ic->ic_softc;
6353
6354         IWM_LOCK(sc);
6355         iwm_led_blink_stop(sc);
6356         if (vap->iv_state == IEEE80211_S_RUN)
6357                 iwm_mvm_led_enable(sc);
6358         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6359                 /*
6360                  * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6361                  * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6362                  * taskqueue.
6363                  */
6364                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6365                 iwm_mvm_scan_stop_wait(sc);
6366         }
6367         IWM_UNLOCK(sc);
6368
6369         /*
6370          * Make sure we don't race, if sc_es_task is still enqueued here.
6371          * This is to make sure that it won't call ieee80211_scan_done
6372          * when we have already started the next scan.
6373          */
6374         taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6375 }
6376
6377 static void
6378 iwm_update_mcast(struct ieee80211com *ic)
6379 {
6380 }
6381
6382 static void
6383 iwm_set_channel(struct ieee80211com *ic)
6384 {
6385 }
6386
6387 static void
6388 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6389 {
6390 }
6391
6392 static void
6393 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6394 {
6395         return;
6396 }
6397
6398 void
6399 iwm_init_task(void *arg1)
6400 {
6401         struct iwm_softc *sc = arg1;
6402
6403         IWM_LOCK(sc);
6404         while (sc->sc_flags & IWM_FLAG_BUSY)
6405                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6406         sc->sc_flags |= IWM_FLAG_BUSY;
6407         iwm_stop(sc);
6408         if (sc->sc_ic.ic_nrunning > 0)
6409                 iwm_init(sc);
6410         sc->sc_flags &= ~IWM_FLAG_BUSY;
6411         wakeup(&sc->sc_flags);
6412         IWM_UNLOCK(sc);
6413 }
6414
6415 static int
6416 iwm_resume(device_t dev)
6417 {
6418         struct iwm_softc *sc = device_get_softc(dev);
6419         int do_reinit = 0;
6420
6421         /*
6422          * We disable the RETRY_TIMEOUT register (0x41) to keep
6423          * PCI Tx retries from interfering with C3 CPU state.
6424          */
6425         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6426         iwm_init_task(device_get_softc(dev));
6427
6428         IWM_LOCK(sc);
6429         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6430                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6431                 do_reinit = 1;
6432         }
6433         IWM_UNLOCK(sc);
6434
6435         if (do_reinit)
6436                 ieee80211_resume_all(&sc->sc_ic);
6437
6438         return 0;
6439 }
6440
6441 static int
6442 iwm_suspend(device_t dev)
6443 {
6444         int do_stop = 0;
6445         struct iwm_softc *sc = device_get_softc(dev);
6446
6447         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6448
6449         ieee80211_suspend_all(&sc->sc_ic);
6450
6451         if (do_stop) {
6452                 IWM_LOCK(sc);
6453                 iwm_stop(sc);
6454                 sc->sc_flags |= IWM_FLAG_SCANNING;
6455                 IWM_UNLOCK(sc);
6456         }
6457
6458         return (0);
6459 }
6460
6461 static int
6462 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6463 {
6464         struct iwm_fw_info *fw = &sc->sc_fw;
6465         device_t dev = sc->sc_dev;
6466         int i;
6467
6468         if (!sc->sc_attached)
6469                 return 0;
6470         sc->sc_attached = 0;
6471
6472         if (do_net80211)
6473                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6474
6475         callout_drain(&sc->sc_led_blink_to);
6476         callout_drain(&sc->sc_watchdog_to);
6477         iwm_stop_device(sc);
6478         if (do_net80211) {
6479                 ieee80211_ifdetach(&sc->sc_ic);
6480         }
6481
6482         iwm_phy_db_free(sc->sc_phy_db);
6483         sc->sc_phy_db = NULL;
6484
6485         iwm_free_nvm_data(sc->nvm_data);
6486
6487         /* Free descriptor rings */
6488         iwm_free_rx_ring(sc, &sc->rxq);
6489         for (i = 0; i < nitems(sc->txq); i++)
6490                 iwm_free_tx_ring(sc, &sc->txq[i]);
6491
6492         /* Free firmware */
6493         if (fw->fw_fp != NULL)
6494                 iwm_fw_info_free(fw);
6495
6496         /* Free scheduler */
6497         iwm_dma_contig_free(&sc->sched_dma);
6498         iwm_dma_contig_free(&sc->ict_dma);
6499         iwm_dma_contig_free(&sc->kw_dma);
6500         iwm_dma_contig_free(&sc->fw_dma);
6501
6502         iwm_free_fw_paging(sc);
6503
6504         /* Finished with the hardware - detach things */
6505         iwm_pci_detach(dev);
6506
6507         if (sc->sc_notif_wait != NULL) {
6508                 iwm_notification_wait_free(sc->sc_notif_wait);
6509                 sc->sc_notif_wait = NULL;
6510         }
6511
6512         mbufq_drain(&sc->sc_snd);
6513         IWM_LOCK_DESTROY(sc);
6514
6515         return (0);
6516 }
6517
6518 static int
6519 iwm_detach(device_t dev)
6520 {
6521         struct iwm_softc *sc = device_get_softc(dev);
6522
6523         return (iwm_detach_local(sc, 1));
6524 }
6525
6526 static device_method_t iwm_pci_methods[] = {
6527         /* Device interface */
6528         DEVMETHOD(device_probe,         iwm_probe),
6529         DEVMETHOD(device_attach,        iwm_attach),
6530         DEVMETHOD(device_detach,        iwm_detach),
6531         DEVMETHOD(device_suspend,       iwm_suspend),
6532         DEVMETHOD(device_resume,        iwm_resume),
6533
6534         DEVMETHOD_END
6535 };
6536
6537 static driver_t iwm_pci_driver = {
6538         "iwm",
6539         iwm_pci_methods,
6540         sizeof (struct iwm_softc)
6541 };
6542
6543 static devclass_t iwm_devclass;
6544
6545 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6546 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6547 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6548 MODULE_DEPEND(iwm, wlan, 1, 1, 1);