]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
iwm - Avoid Tx watchdog timeout, when dropping a connection.
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $    */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109 #include "opt_iwm.h"
110
111 #include <sys/param.h>
112 #include <sys/bus.h>
113 #include <sys/conf.h>
114 #include <sys/endian.h>
115 #include <sys/firmware.h>
116 #include <sys/kernel.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/module.h>
121 #include <sys/proc.h>
122 #include <sys/rman.h>
123 #include <sys/socket.h>
124 #include <sys/sockio.h>
125 #include <sys/sysctl.h>
126 #include <sys/linker.h>
127
128 #include <machine/bus.h>
129 #include <machine/endian.h>
130 #include <machine/resource.h>
131
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcireg.h>
134
135 #include <net/bpf.h>
136
137 #include <net/if.h>
138 #include <net/if_var.h>
139 #include <net/if_arp.h>
140 #include <net/if_dl.h>
141 #include <net/if_media.h>
142 #include <net/if_types.h>
143
144 #include <netinet/in.h>
145 #include <netinet/in_systm.h>
146 #include <netinet/if_ether.h>
147 #include <netinet/ip.h>
148
149 #include <net80211/ieee80211_var.h>
150 #include <net80211/ieee80211_regdomain.h>
151 #include <net80211/ieee80211_ratectl.h>
152 #include <net80211/ieee80211_radiotap.h>
153
154 #include <dev/iwm/if_iwmreg.h>
155 #include <dev/iwm/if_iwmvar.h>
156 #include <dev/iwm/if_iwm_config.h>
157 #include <dev/iwm/if_iwm_debug.h>
158 #include <dev/iwm/if_iwm_notif_wait.h>
159 #include <dev/iwm/if_iwm_util.h>
160 #include <dev/iwm/if_iwm_binding.h>
161 #include <dev/iwm/if_iwm_phy_db.h>
162 #include <dev/iwm/if_iwm_mac_ctxt.h>
163 #include <dev/iwm/if_iwm_phy_ctxt.h>
164 #include <dev/iwm/if_iwm_time_event.h>
165 #include <dev/iwm/if_iwm_power.h>
166 #include <dev/iwm/if_iwm_scan.h>
167 #include <dev/iwm/if_iwm_sf.h>
168 #include <dev/iwm/if_iwm_sta.h>
169
170 #include <dev/iwm/if_iwm_pcie_trans.h>
171 #include <dev/iwm/if_iwm_led.h>
172 #include <dev/iwm/if_iwm_fw.h>
173
174 /* From DragonflyBSD */
175 #define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
176
177 const uint8_t iwm_nvm_channels[] = {
178         /* 2.4 GHz */
179         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
180         /* 5 GHz */
181         36, 40, 44, 48, 52, 56, 60, 64,
182         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
183         149, 153, 157, 161, 165
184 };
185 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
186     "IWM_NUM_CHANNELS is too small");
187
188 const uint8_t iwm_nvm_channels_8000[] = {
189         /* 2.4 GHz */
190         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
191         /* 5 GHz */
192         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
193         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
194         149, 153, 157, 161, 165, 169, 173, 177, 181
195 };
196 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
197     "IWM_NUM_CHANNELS_8000 is too small");
198
199 #define IWM_NUM_2GHZ_CHANNELS   14
200 #define IWM_N_HW_ADDR_MASK      0xF
201
202 /*
203  * XXX For now, there's simply a fixed set of rate table entries
204  * that are populated.
205  */
206 const struct iwm_rate {
207         uint8_t rate;
208         uint8_t plcp;
209 } iwm_rates[] = {
210         {   2,  IWM_RATE_1M_PLCP  },
211         {   4,  IWM_RATE_2M_PLCP  },
212         {  11,  IWM_RATE_5M_PLCP  },
213         {  22,  IWM_RATE_11M_PLCP },
214         {  12,  IWM_RATE_6M_PLCP  },
215         {  18,  IWM_RATE_9M_PLCP  },
216         {  24,  IWM_RATE_12M_PLCP },
217         {  36,  IWM_RATE_18M_PLCP },
218         {  48,  IWM_RATE_24M_PLCP },
219         {  72,  IWM_RATE_36M_PLCP },
220         {  96,  IWM_RATE_48M_PLCP },
221         { 108,  IWM_RATE_54M_PLCP },
222 };
223 #define IWM_RIDX_CCK    0
224 #define IWM_RIDX_OFDM   4
225 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
226 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
227 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
228
229 struct iwm_nvm_section {
230         uint16_t length;
231         uint8_t *data;
232 };
233
234 #define IWM_MVM_UCODE_ALIVE_TIMEOUT     hz
235 #define IWM_MVM_UCODE_CALIB_TIMEOUT     (2*hz)
236
237 struct iwm_mvm_alive_data {
238         int valid;
239         uint32_t scd_base_addr;
240 };
241
242 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
243 static int      iwm_firmware_store_section(struct iwm_softc *,
244                                            enum iwm_ucode_type,
245                                            const uint8_t *, size_t);
246 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
247 static void     iwm_fw_info_free(struct iwm_fw_info *);
248 static int      iwm_read_firmware(struct iwm_softc *);
249 static int      iwm_alloc_fwmem(struct iwm_softc *);
250 static int      iwm_alloc_sched(struct iwm_softc *);
251 static int      iwm_alloc_kw(struct iwm_softc *);
252 static int      iwm_alloc_ict(struct iwm_softc *);
253 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
256 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
257                                   int);
258 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
260 static void     iwm_enable_interrupts(struct iwm_softc *);
261 static void     iwm_restore_interrupts(struct iwm_softc *);
262 static void     iwm_disable_interrupts(struct iwm_softc *);
263 static void     iwm_ict_reset(struct iwm_softc *);
264 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
265 static void     iwm_stop_device(struct iwm_softc *);
266 static void     iwm_mvm_nic_config(struct iwm_softc *);
267 static int      iwm_nic_rx_init(struct iwm_softc *);
268 static int      iwm_nic_tx_init(struct iwm_softc *);
269 static int      iwm_nic_init(struct iwm_softc *);
270 static int      iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
271 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
272                                    uint16_t, uint8_t *, uint16_t *);
273 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
274                                      uint16_t *, uint32_t);
275 static uint32_t iwm_eeprom_channel_flags(uint16_t);
276 static void     iwm_add_channel_band(struct iwm_softc *,
277                     struct ieee80211_channel[], int, int *, int, size_t,
278                     const uint8_t[]);
279 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
280                     struct ieee80211_channel[]);
281 static struct iwm_nvm_data *
282         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
283                            const uint16_t *, const uint16_t *,
284                            const uint16_t *, const uint16_t *,
285                            const uint16_t *);
286 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
287 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
288                                                struct iwm_nvm_data *,
289                                                const uint16_t *,
290                                                const uint16_t *);
291 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
292                             const uint16_t *);
293 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
294 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
295                                   const uint16_t *);
296 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
297                                    const uint16_t *);
298 static void     iwm_set_radio_cfg(const struct iwm_softc *,
299                                   struct iwm_nvm_data *, uint32_t);
300 static struct iwm_nvm_data *
301         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
302 static int      iwm_nvm_init(struct iwm_softc *);
303 static int      iwm_pcie_load_section(struct iwm_softc *, uint8_t,
304                                       const struct iwm_fw_desc *);
305 static int      iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
306                                              bus_addr_t, uint32_t);
307 static int      iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
308                                                 const struct iwm_fw_img *,
309                                                 int, int *);
310 static int      iwm_pcie_load_cpu_sections(struct iwm_softc *,
311                                            const struct iwm_fw_img *,
312                                            int, int *);
313 static int      iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
314                                                const struct iwm_fw_img *);
315 static int      iwm_pcie_load_given_ucode(struct iwm_softc *,
316                                           const struct iwm_fw_img *);
317 static int      iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
318 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
319 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
320 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
321                                               enum iwm_ucode_type);
322 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
323 static int      iwm_mvm_config_ltr(struct iwm_softc *sc);
324 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
325 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
326                                             struct iwm_rx_phy_info *);
327 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
328                                       struct iwm_rx_packet *);
329 static int      iwm_get_noise(struct iwm_softc *,
330                     const struct iwm_mvm_statistics_rx_non_phy *);
331 static void     iwm_mvm_handle_rx_statistics(struct iwm_softc *,
332                     struct iwm_rx_packet *);
333 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
334                                     uint32_t, boolean_t);
335 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
336                                          struct iwm_rx_packet *,
337                                          struct iwm_node *);
338 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
339 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
340 #if 0
341 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
342                                  uint16_t);
343 #endif
344 static const struct iwm_rate *
345         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
346                         struct mbuf *, struct iwm_tx_cmd *);
347 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
348                        struct ieee80211_node *, int);
349 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
350                              const struct ieee80211_bpf_params *);
351 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
352 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
353 static struct ieee80211_node *
354                 iwm_node_alloc(struct ieee80211vap *,
355                                const uint8_t[IEEE80211_ADDR_LEN]);
356 static uint8_t  iwm_rate_from_ucode_rate(uint32_t);
357 static int      iwm_rate2ridx(struct iwm_softc *, uint8_t);
358 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
359 static int      iwm_media_change(struct ifnet *);
360 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
361 static void     iwm_endscan_cb(void *, int);
362 static int      iwm_send_bt_init_conf(struct iwm_softc *);
363 static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
364 static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
365 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
366 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
367 static int      iwm_init_hw(struct iwm_softc *);
368 static void     iwm_init(struct iwm_softc *);
369 static void     iwm_start(struct iwm_softc *);
370 static void     iwm_stop(struct iwm_softc *);
371 static void     iwm_watchdog(void *);
372 static void     iwm_parent(struct ieee80211com *);
373 #ifdef IWM_DEBUG
374 static const char *
375                 iwm_desc_lookup(uint32_t);
376 static void     iwm_nic_error(struct iwm_softc *);
377 static void     iwm_nic_umac_error(struct iwm_softc *);
378 #endif
379 static void     iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
380 static void     iwm_notif_intr(struct iwm_softc *);
381 static void     iwm_intr(void *);
382 static int      iwm_attach(device_t);
383 static int      iwm_is_valid_ether_addr(uint8_t *);
384 static void     iwm_preinit(void *);
385 static int      iwm_detach_local(struct iwm_softc *sc, int);
386 static void     iwm_init_task(void *);
387 static void     iwm_radiotap_attach(struct iwm_softc *);
388 static struct ieee80211vap *
389                 iwm_vap_create(struct ieee80211com *,
390                                const char [IFNAMSIZ], int,
391                                enum ieee80211_opmode, int,
392                                const uint8_t [IEEE80211_ADDR_LEN],
393                                const uint8_t [IEEE80211_ADDR_LEN]);
394 static void     iwm_vap_delete(struct ieee80211vap *);
395 static void     iwm_xmit_queue_drain(struct iwm_softc *);
396 static void     iwm_scan_start(struct ieee80211com *);
397 static void     iwm_scan_end(struct ieee80211com *);
398 static void     iwm_update_mcast(struct ieee80211com *);
399 static void     iwm_set_channel(struct ieee80211com *);
400 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
401 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
402 static int      iwm_detach(device_t);
403
404 static int      iwm_lar_disable = 0;
405 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
406
407 /*
408  * Firmware parser.
409  */
410
411 static int
412 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
413 {
414         const struct iwm_fw_cscheme_list *l = (const void *)data;
415
416         if (dlen < sizeof(*l) ||
417             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
418                 return EINVAL;
419
420         /* we don't actually store anything for now, always use s/w crypto */
421
422         return 0;
423 }
424
425 static int
426 iwm_firmware_store_section(struct iwm_softc *sc,
427     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
428 {
429         struct iwm_fw_img *fws;
430         struct iwm_fw_desc *fwone;
431
432         if (type >= IWM_UCODE_TYPE_MAX)
433                 return EINVAL;
434         if (dlen < sizeof(uint32_t))
435                 return EINVAL;
436
437         fws = &sc->sc_fw.img[type];
438         if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
439                 return EINVAL;
440
441         fwone = &fws->sec[fws->fw_count];
442
443         /* first 32bit are device load offset */
444         memcpy(&fwone->offset, data, sizeof(uint32_t));
445
446         /* rest is data */
447         fwone->data = data + sizeof(uint32_t);
448         fwone->len = dlen - sizeof(uint32_t);
449
450         fws->fw_count++;
451
452         return 0;
453 }
454
455 #define IWM_DEFAULT_SCAN_CHANNELS 40
456
457 /* iwlwifi: iwl-drv.c */
458 struct iwm_tlv_calib_data {
459         uint32_t ucode_type;
460         struct iwm_tlv_calib_ctrl calib;
461 } __packed;
462
463 static int
464 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
465 {
466         const struct iwm_tlv_calib_data *def_calib = data;
467         uint32_t ucode_type = le32toh(def_calib->ucode_type);
468
469         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
470                 device_printf(sc->sc_dev,
471                     "Wrong ucode_type %u for default "
472                     "calibration.\n", ucode_type);
473                 return EINVAL;
474         }
475
476         sc->sc_default_calib[ucode_type].flow_trigger =
477             def_calib->calib.flow_trigger;
478         sc->sc_default_calib[ucode_type].event_trigger =
479             def_calib->calib.event_trigger;
480
481         return 0;
482 }
483
484 static int
485 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
486                         struct iwm_ucode_capabilities *capa)
487 {
488         const struct iwm_ucode_api *ucode_api = (const void *)data;
489         uint32_t api_index = le32toh(ucode_api->api_index);
490         uint32_t api_flags = le32toh(ucode_api->api_flags);
491         int i;
492
493         if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
494                 device_printf(sc->sc_dev,
495                     "api flags index %d larger than supported by driver\n",
496                     api_index);
497                 /* don't return an error so we can load FW that has more bits */
498                 return 0;
499         }
500
501         for (i = 0; i < 32; i++) {
502                 if (api_flags & (1U << i))
503                         setbit(capa->enabled_api, i + 32 * api_index);
504         }
505
506         return 0;
507 }
508
509 static int
510 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
511                            struct iwm_ucode_capabilities *capa)
512 {
513         const struct iwm_ucode_capa *ucode_capa = (const void *)data;
514         uint32_t api_index = le32toh(ucode_capa->api_index);
515         uint32_t api_flags = le32toh(ucode_capa->api_capa);
516         int i;
517
518         if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
519                 device_printf(sc->sc_dev,
520                     "capa flags index %d larger than supported by driver\n",
521                     api_index);
522                 /* don't return an error so we can load FW that has more bits */
523                 return 0;
524         }
525
526         for (i = 0; i < 32; i++) {
527                 if (api_flags & (1U << i))
528                         setbit(capa->enabled_capa, i + 32 * api_index);
529         }
530
531         return 0;
532 }
533
534 static void
535 iwm_fw_info_free(struct iwm_fw_info *fw)
536 {
537         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
538         fw->fw_fp = NULL;
539         memset(fw->img, 0, sizeof(fw->img));
540 }
541
542 static int
543 iwm_read_firmware(struct iwm_softc *sc)
544 {
545         struct iwm_fw_info *fw = &sc->sc_fw;
546         const struct iwm_tlv_ucode_header *uhdr;
547         const struct iwm_ucode_tlv *tlv;
548         struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
549         enum iwm_ucode_tlv_type tlv_type;
550         const struct firmware *fwp;
551         const uint8_t *data;
552         uint32_t tlv_len;
553         uint32_t usniffer_img;
554         const uint8_t *tlv_data;
555         uint32_t paging_mem_size;
556         int num_of_cpus;
557         int error = 0;
558         size_t len;
559
560         /*
561          * Load firmware into driver memory.
562          * fw_fp will be set.
563          */
564         fwp = firmware_get(sc->cfg->fw_name);
565         if (fwp == NULL) {
566                 device_printf(sc->sc_dev,
567                     "could not read firmware %s (error %d)\n",
568                     sc->cfg->fw_name, error);
569                 goto out;
570         }
571         fw->fw_fp = fwp;
572
573         /* (Re-)Initialize default values. */
574         capa->flags = 0;
575         capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
576         capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
577         memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
578         memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
579         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
580
581         /*
582          * Parse firmware contents
583          */
584
585         uhdr = (const void *)fw->fw_fp->data;
586         if (*(const uint32_t *)fw->fw_fp->data != 0
587             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
588                 device_printf(sc->sc_dev, "invalid firmware %s\n",
589                     sc->cfg->fw_name);
590                 error = EINVAL;
591                 goto out;
592         }
593
594         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
595             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
596             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
597             IWM_UCODE_API(le32toh(uhdr->ver)));
598         data = uhdr->data;
599         len = fw->fw_fp->datasize - sizeof(*uhdr);
600
601         while (len >= sizeof(*tlv)) {
602                 len -= sizeof(*tlv);
603                 tlv = (const void *)data;
604
605                 tlv_len = le32toh(tlv->length);
606                 tlv_type = le32toh(tlv->type);
607                 tlv_data = tlv->data;
608
609                 if (len < tlv_len) {
610                         device_printf(sc->sc_dev,
611                             "firmware too short: %zu bytes\n",
612                             len);
613                         error = EINVAL;
614                         goto parse_out;
615                 }
616                 len -= roundup2(tlv_len, 4);
617                 data += sizeof(*tlv) + roundup2(tlv_len, 4);
618
619                 switch ((int)tlv_type) {
620                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
621                         if (tlv_len != sizeof(uint32_t)) {
622                                 device_printf(sc->sc_dev,
623                                     "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
624                                     __func__, tlv_len);
625                                 error = EINVAL;
626                                 goto parse_out;
627                         }
628                         capa->max_probe_length =
629                             le32_to_cpup((const uint32_t *)tlv_data);
630                         /* limit it to something sensible */
631                         if (capa->max_probe_length >
632                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
633                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
634                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
635                                     "ridiculous\n", __func__);
636                                 error = EINVAL;
637                                 goto parse_out;
638                         }
639                         break;
640                 case IWM_UCODE_TLV_PAN:
641                         if (tlv_len) {
642                                 device_printf(sc->sc_dev,
643                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
644                                     __func__, tlv_len);
645                                 error = EINVAL;
646                                 goto parse_out;
647                         }
648                         capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
649                         break;
650                 case IWM_UCODE_TLV_FLAGS:
651                         if (tlv_len < sizeof(uint32_t)) {
652                                 device_printf(sc->sc_dev,
653                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
654                                     __func__, tlv_len);
655                                 error = EINVAL;
656                                 goto parse_out;
657                         }
658                         if (tlv_len % sizeof(uint32_t)) {
659                                 device_printf(sc->sc_dev,
660                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
661                                     __func__, tlv_len);
662                                 error = EINVAL;
663                                 goto parse_out;
664                         }
665                         /*
666                          * Apparently there can be many flags, but Linux driver
667                          * parses only the first one, and so do we.
668                          *
669                          * XXX: why does this override IWM_UCODE_TLV_PAN?
670                          * Intentional or a bug?  Observations from
671                          * current firmware file:
672                          *  1) TLV_PAN is parsed first
673                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
674                          * ==> this resets TLV_PAN to itself... hnnnk
675                          */
676                         capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
677                         break;
678                 case IWM_UCODE_TLV_CSCHEME:
679                         if ((error = iwm_store_cscheme(sc,
680                             tlv_data, tlv_len)) != 0) {
681                                 device_printf(sc->sc_dev,
682                                     "%s: iwm_store_cscheme(): returned %d\n",
683                                     __func__, error);
684                                 goto parse_out;
685                         }
686                         break;
687                 case IWM_UCODE_TLV_NUM_OF_CPU:
688                         if (tlv_len != sizeof(uint32_t)) {
689                                 device_printf(sc->sc_dev,
690                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
691                                     __func__, tlv_len);
692                                 error = EINVAL;
693                                 goto parse_out;
694                         }
695                         num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
696                         if (num_of_cpus == 2) {
697                                 fw->img[IWM_UCODE_REGULAR].is_dual_cpus =
698                                         TRUE;
699                                 fw->img[IWM_UCODE_INIT].is_dual_cpus =
700                                         TRUE;
701                                 fw->img[IWM_UCODE_WOWLAN].is_dual_cpus =
702                                         TRUE;
703                         } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
704                                 device_printf(sc->sc_dev,
705                                     "%s: Driver supports only 1 or 2 CPUs\n",
706                                     __func__);
707                                 error = EINVAL;
708                                 goto parse_out;
709                         }
710                         break;
711                 case IWM_UCODE_TLV_SEC_RT:
712                         if ((error = iwm_firmware_store_section(sc,
713                             IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
714                                 device_printf(sc->sc_dev,
715                                     "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
716                                     __func__, error);
717                                 goto parse_out;
718                         }
719                         break;
720                 case IWM_UCODE_TLV_SEC_INIT:
721                         if ((error = iwm_firmware_store_section(sc,
722                             IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
723                                 device_printf(sc->sc_dev,
724                                     "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
725                                     __func__, error);
726                                 goto parse_out;
727                         }
728                         break;
729                 case IWM_UCODE_TLV_SEC_WOWLAN:
730                         if ((error = iwm_firmware_store_section(sc,
731                             IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
732                                 device_printf(sc->sc_dev,
733                                     "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
734                                     __func__, error);
735                                 goto parse_out;
736                         }
737                         break;
738                 case IWM_UCODE_TLV_DEF_CALIB:
739                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
740                                 device_printf(sc->sc_dev,
741                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
742                                     __func__, tlv_len,
743                                     sizeof(struct iwm_tlv_calib_data));
744                                 error = EINVAL;
745                                 goto parse_out;
746                         }
747                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
748                                 device_printf(sc->sc_dev,
749                                     "%s: iwm_set_default_calib() failed: %d\n",
750                                     __func__, error);
751                                 goto parse_out;
752                         }
753                         break;
754                 case IWM_UCODE_TLV_PHY_SKU:
755                         if (tlv_len != sizeof(uint32_t)) {
756                                 error = EINVAL;
757                                 device_printf(sc->sc_dev,
758                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
759                                     __func__, tlv_len);
760                                 goto parse_out;
761                         }
762                         sc->sc_fw.phy_config =
763                             le32_to_cpup((const uint32_t *)tlv_data);
764                         sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
765                                                   IWM_FW_PHY_CFG_TX_CHAIN) >>
766                                                   IWM_FW_PHY_CFG_TX_CHAIN_POS;
767                         sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
768                                                   IWM_FW_PHY_CFG_RX_CHAIN) >>
769                                                   IWM_FW_PHY_CFG_RX_CHAIN_POS;
770                         break;
771
772                 case IWM_UCODE_TLV_API_CHANGES_SET: {
773                         if (tlv_len != sizeof(struct iwm_ucode_api)) {
774                                 error = EINVAL;
775                                 goto parse_out;
776                         }
777                         if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
778                                 error = EINVAL;
779                                 goto parse_out;
780                         }
781                         break;
782                 }
783
784                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
785                         if (tlv_len != sizeof(struct iwm_ucode_capa)) {
786                                 error = EINVAL;
787                                 goto parse_out;
788                         }
789                         if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
790                                 error = EINVAL;
791                                 goto parse_out;
792                         }
793                         break;
794                 }
795
796                 case 48: /* undocumented TLV */
797                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
798                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
799                         /* ignore, not used by current driver */
800                         break;
801
802                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
803                         if ((error = iwm_firmware_store_section(sc,
804                             IWM_UCODE_REGULAR_USNIFFER, tlv_data,
805                             tlv_len)) != 0)
806                                 goto parse_out;
807                         break;
808
809                 case IWM_UCODE_TLV_PAGING:
810                         if (tlv_len != sizeof(uint32_t)) {
811                                 error = EINVAL;
812                                 goto parse_out;
813                         }
814                         paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
815
816                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
817                             "%s: Paging: paging enabled (size = %u bytes)\n",
818                             __func__, paging_mem_size);
819                         if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
820                                 device_printf(sc->sc_dev,
821                                         "%s: Paging: driver supports up to %u bytes for paging image\n",
822                                         __func__, IWM_MAX_PAGING_IMAGE_SIZE);
823                                 error = EINVAL;
824                                 goto out;
825                         }
826                         if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
827                                 device_printf(sc->sc_dev,
828                                     "%s: Paging: image isn't multiple %u\n",
829                                     __func__, IWM_FW_PAGING_SIZE);
830                                 error = EINVAL;
831                                 goto out;
832                         }
833
834                         sc->sc_fw.img[IWM_UCODE_REGULAR].paging_mem_size =
835                             paging_mem_size;
836                         usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
837                         sc->sc_fw.img[usniffer_img].paging_mem_size =
838                             paging_mem_size;
839                         break;
840
841                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
842                         if (tlv_len != sizeof(uint32_t)) {
843                                 error = EINVAL;
844                                 goto parse_out;
845                         }
846                         capa->n_scan_channels =
847                             le32_to_cpup((const uint32_t *)tlv_data);
848                         break;
849
850                 case IWM_UCODE_TLV_FW_VERSION:
851                         if (tlv_len != sizeof(uint32_t) * 3) {
852                                 error = EINVAL;
853                                 goto parse_out;
854                         }
855                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
856                             "%d.%d.%d",
857                             le32toh(((const uint32_t *)tlv_data)[0]),
858                             le32toh(((const uint32_t *)tlv_data)[1]),
859                             le32toh(((const uint32_t *)tlv_data)[2]));
860                         break;
861
862                 case IWM_UCODE_TLV_FW_MEM_SEG:
863                         break;
864
865                 default:
866                         device_printf(sc->sc_dev,
867                             "%s: unknown firmware section %d, abort\n",
868                             __func__, tlv_type);
869                         error = EINVAL;
870                         goto parse_out;
871                 }
872         }
873
874         KASSERT(error == 0, ("unhandled error"));
875
876  parse_out:
877         if (error) {
878                 device_printf(sc->sc_dev, "firmware parse error %d, "
879                     "section type %d\n", error, tlv_type);
880         }
881
882  out:
883         if (error) {
884                 if (fw->fw_fp != NULL)
885                         iwm_fw_info_free(fw);
886         }
887
888         return error;
889 }
890
891 /*
892  * DMA resource routines
893  */
894
895 /* fwmem is used to load firmware onto the card */
896 static int
897 iwm_alloc_fwmem(struct iwm_softc *sc)
898 {
899         /* Must be aligned on a 16-byte boundary. */
900         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
901             IWM_FH_MEM_TB_MAX_LENGTH, 16);
902 }
903
904 /* tx scheduler rings.  not used? */
905 static int
906 iwm_alloc_sched(struct iwm_softc *sc)
907 {
908         /* TX scheduler rings must be aligned on a 1KB boundary. */
909         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
910             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
911 }
912
913 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
914 static int
915 iwm_alloc_kw(struct iwm_softc *sc)
916 {
917         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
918 }
919
920 /* interrupt cause table */
921 static int
922 iwm_alloc_ict(struct iwm_softc *sc)
923 {
924         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
925             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
926 }
927
928 static int
929 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
930 {
931         bus_size_t size;
932         int i, error;
933
934         ring->cur = 0;
935
936         /* Allocate RX descriptors (256-byte aligned). */
937         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
938         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
939         if (error != 0) {
940                 device_printf(sc->sc_dev,
941                     "could not allocate RX ring DMA memory\n");
942                 goto fail;
943         }
944         ring->desc = ring->desc_dma.vaddr;
945
946         /* Allocate RX status area (16-byte aligned). */
947         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
948             sizeof(*ring->stat), 16);
949         if (error != 0) {
950                 device_printf(sc->sc_dev,
951                     "could not allocate RX status DMA memory\n");
952                 goto fail;
953         }
954         ring->stat = ring->stat_dma.vaddr;
955
956         /* Create RX buffer DMA tag. */
957         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
958             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
959             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
960         if (error != 0) {
961                 device_printf(sc->sc_dev,
962                     "%s: could not create RX buf DMA tag, error %d\n",
963                     __func__, error);
964                 goto fail;
965         }
966
967         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
968         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
969         if (error != 0) {
970                 device_printf(sc->sc_dev,
971                     "%s: could not create RX buf DMA map, error %d\n",
972                     __func__, error);
973                 goto fail;
974         }
975         /*
976          * Allocate and map RX buffers.
977          */
978         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
979                 struct iwm_rx_data *data = &ring->data[i];
980                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
981                 if (error != 0) {
982                         device_printf(sc->sc_dev,
983                             "%s: could not create RX buf DMA map, error %d\n",
984                             __func__, error);
985                         goto fail;
986                 }
987                 data->m = NULL;
988
989                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
990                         goto fail;
991                 }
992         }
993         return 0;
994
995 fail:   iwm_free_rx_ring(sc, ring);
996         return error;
997 }
998
999 static void
1000 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1001 {
1002         /* Reset the ring state */
1003         ring->cur = 0;
1004
1005         /*
1006          * The hw rx ring index in shared memory must also be cleared,
1007          * otherwise the discrepancy can cause reprocessing chaos.
1008          */
1009         if (sc->rxq.stat)
1010                 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1011 }
1012
1013 static void
1014 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1015 {
1016         int i;
1017
1018         iwm_dma_contig_free(&ring->desc_dma);
1019         iwm_dma_contig_free(&ring->stat_dma);
1020
1021         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1022                 struct iwm_rx_data *data = &ring->data[i];
1023
1024                 if (data->m != NULL) {
1025                         bus_dmamap_sync(ring->data_dmat, data->map,
1026                             BUS_DMASYNC_POSTREAD);
1027                         bus_dmamap_unload(ring->data_dmat, data->map);
1028                         m_freem(data->m);
1029                         data->m = NULL;
1030                 }
1031                 if (data->map != NULL) {
1032                         bus_dmamap_destroy(ring->data_dmat, data->map);
1033                         data->map = NULL;
1034                 }
1035         }
1036         if (ring->spare_map != NULL) {
1037                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1038                 ring->spare_map = NULL;
1039         }
1040         if (ring->data_dmat != NULL) {
1041                 bus_dma_tag_destroy(ring->data_dmat);
1042                 ring->data_dmat = NULL;
1043         }
1044 }
1045
1046 static int
1047 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1048 {
1049         bus_addr_t paddr;
1050         bus_size_t size;
1051         size_t maxsize;
1052         int nsegments;
1053         int i, error;
1054
1055         ring->qid = qid;
1056         ring->queued = 0;
1057         ring->cur = 0;
1058
1059         /* Allocate TX descriptors (256-byte aligned). */
1060         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1061         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1062         if (error != 0) {
1063                 device_printf(sc->sc_dev,
1064                     "could not allocate TX ring DMA memory\n");
1065                 goto fail;
1066         }
1067         ring->desc = ring->desc_dma.vaddr;
1068
1069         /*
1070          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1071          * to allocate commands space for other rings.
1072          */
1073         if (qid > IWM_MVM_CMD_QUEUE)
1074                 return 0;
1075
1076         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1077         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1078         if (error != 0) {
1079                 device_printf(sc->sc_dev,
1080                     "could not allocate TX cmd DMA memory\n");
1081                 goto fail;
1082         }
1083         ring->cmd = ring->cmd_dma.vaddr;
1084
1085         /* FW commands may require more mapped space than packets. */
1086         if (qid == IWM_MVM_CMD_QUEUE) {
1087                 maxsize = IWM_RBUF_SIZE;
1088                 nsegments = 1;
1089         } else {
1090                 maxsize = MCLBYTES;
1091                 nsegments = IWM_MAX_SCATTER - 2;
1092         }
1093
1094         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1095             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1096             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1097         if (error != 0) {
1098                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1099                 goto fail;
1100         }
1101
1102         paddr = ring->cmd_dma.paddr;
1103         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1104                 struct iwm_tx_data *data = &ring->data[i];
1105
1106                 data->cmd_paddr = paddr;
1107                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1108                     + offsetof(struct iwm_tx_cmd, scratch);
1109                 paddr += sizeof(struct iwm_device_cmd);
1110
1111                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1112                 if (error != 0) {
1113                         device_printf(sc->sc_dev,
1114                             "could not create TX buf DMA map\n");
1115                         goto fail;
1116                 }
1117         }
1118         KASSERT(paddr == ring->cmd_dma.paddr + size,
1119             ("invalid physical address"));
1120         return 0;
1121
1122 fail:   iwm_free_tx_ring(sc, ring);
1123         return error;
1124 }
1125
1126 static void
1127 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1128 {
1129         int i;
1130
1131         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1132                 struct iwm_tx_data *data = &ring->data[i];
1133
1134                 if (data->m != NULL) {
1135                         bus_dmamap_sync(ring->data_dmat, data->map,
1136                             BUS_DMASYNC_POSTWRITE);
1137                         bus_dmamap_unload(ring->data_dmat, data->map);
1138                         m_freem(data->m);
1139                         data->m = NULL;
1140                 }
1141         }
1142         /* Clear TX descriptors. */
1143         memset(ring->desc, 0, ring->desc_dma.size);
1144         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1145             BUS_DMASYNC_PREWRITE);
1146         sc->qfullmsk &= ~(1 << ring->qid);
1147         ring->queued = 0;
1148         ring->cur = 0;
1149
1150         if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1151                 iwm_pcie_clear_cmd_in_flight(sc);
1152 }
1153
1154 static void
1155 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1156 {
1157         int i;
1158
1159         iwm_dma_contig_free(&ring->desc_dma);
1160         iwm_dma_contig_free(&ring->cmd_dma);
1161
1162         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1163                 struct iwm_tx_data *data = &ring->data[i];
1164
1165                 if (data->m != NULL) {
1166                         bus_dmamap_sync(ring->data_dmat, data->map,
1167                             BUS_DMASYNC_POSTWRITE);
1168                         bus_dmamap_unload(ring->data_dmat, data->map);
1169                         m_freem(data->m);
1170                         data->m = NULL;
1171                 }
1172                 if (data->map != NULL) {
1173                         bus_dmamap_destroy(ring->data_dmat, data->map);
1174                         data->map = NULL;
1175                 }
1176         }
1177         if (ring->data_dmat != NULL) {
1178                 bus_dma_tag_destroy(ring->data_dmat);
1179                 ring->data_dmat = NULL;
1180         }
1181 }
1182
1183 /*
1184  * High-level hardware frobbing routines
1185  */
1186
1187 static void
1188 iwm_enable_interrupts(struct iwm_softc *sc)
1189 {
1190         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1191         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1192 }
1193
1194 static void
1195 iwm_restore_interrupts(struct iwm_softc *sc)
1196 {
1197         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1198 }
1199
1200 static void
1201 iwm_disable_interrupts(struct iwm_softc *sc)
1202 {
1203         /* disable interrupts */
1204         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1205
1206         /* acknowledge all interrupts */
1207         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1208         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1209 }
1210
1211 static void
1212 iwm_ict_reset(struct iwm_softc *sc)
1213 {
1214         iwm_disable_interrupts(sc);
1215
1216         /* Reset ICT table. */
1217         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1218         sc->ict_cur = 0;
1219
1220         /* Set physical address of ICT table (4KB aligned). */
1221         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1222             IWM_CSR_DRAM_INT_TBL_ENABLE
1223             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1224             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1225             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1226
1227         /* Switch to ICT interrupt mode in driver. */
1228         sc->sc_flags |= IWM_FLAG_USE_ICT;
1229
1230         /* Re-enable interrupts. */
1231         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1232         iwm_enable_interrupts(sc);
1233 }
1234
1235 /* iwlwifi pcie/trans.c */
1236
1237 /*
1238  * Since this .. hard-resets things, it's time to actually
1239  * mark the first vap (if any) as having no mac context.
1240  * It's annoying, but since the driver is potentially being
1241  * stop/start'ed whilst active (thanks openbsd port!) we
1242  * have to correctly track this.
1243  */
1244 static void
1245 iwm_stop_device(struct iwm_softc *sc)
1246 {
1247         struct ieee80211com *ic = &sc->sc_ic;
1248         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1249         int chnl, qid;
1250         uint32_t mask = 0;
1251
1252         /* tell the device to stop sending interrupts */
1253         iwm_disable_interrupts(sc);
1254
1255         /*
1256          * FreeBSD-local: mark the first vap as not-uploaded,
1257          * so the next transition through auth/assoc
1258          * will correctly populate the MAC context.
1259          */
1260         if (vap) {
1261                 struct iwm_vap *iv = IWM_VAP(vap);
1262                 iv->phy_ctxt = NULL;
1263                 iv->is_uploaded = 0;
1264         }
1265         sc->sc_firmware_state = 0;
1266
1267         /* device going down, Stop using ICT table */
1268         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1269
1270         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1271
1272         if (iwm_nic_lock(sc)) {
1273                 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1274
1275                 /* Stop each Tx DMA channel */
1276                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1277                         IWM_WRITE(sc,
1278                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1279                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1280                 }
1281
1282                 /* Wait for DMA channels to be idle */
1283                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1284                     5000)) {
1285                         device_printf(sc->sc_dev,
1286                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1287                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1288                 }
1289                 iwm_nic_unlock(sc);
1290         }
1291         iwm_pcie_rx_stop(sc);
1292
1293         /* Stop RX ring. */
1294         iwm_reset_rx_ring(sc, &sc->rxq);
1295
1296         /* Reset all TX rings. */
1297         for (qid = 0; qid < nitems(sc->txq); qid++)
1298                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1299
1300         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1301                 /* Power-down device's busmaster DMA clocks */
1302                 if (iwm_nic_lock(sc)) {
1303                         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1304                             IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1305                         iwm_nic_unlock(sc);
1306                 }
1307                 DELAY(5);
1308         }
1309
1310         /* Make sure (redundant) we've released our request to stay awake */
1311         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1312             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1313
1314         /* Stop the device, and put it in low power state */
1315         iwm_apm_stop(sc);
1316
1317         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1318          * Clean again the interrupt here
1319          */
1320         iwm_disable_interrupts(sc);
1321         /* stop and reset the on-board processor */
1322         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1323
1324         /*
1325          * Even if we stop the HW, we still want the RF kill
1326          * interrupt
1327          */
1328         iwm_enable_rfkill_int(sc);
1329         iwm_check_rfkill(sc);
1330 }
1331
1332 /* iwlwifi: mvm/ops.c */
1333 static void
1334 iwm_mvm_nic_config(struct iwm_softc *sc)
1335 {
1336         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1337         uint32_t reg_val = 0;
1338         uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1339
1340         radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1341             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1342         radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1343             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1344         radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1345             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1346
1347         /* SKU control */
1348         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1349             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1350         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1351             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1352
1353         /* radio configuration */
1354         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1355         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1356         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1357
1358         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1359
1360         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1361             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1362             radio_cfg_step, radio_cfg_dash);
1363
1364         /*
1365          * W/A : NIC is stuck in a reset state after Early PCIe power off
1366          * (PCIe power is lost before PERST# is asserted), causing ME FW
1367          * to lose ownership and not being able to obtain it back.
1368          */
1369         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1370                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1371                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1372                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1373         }
1374 }
1375
1376 static int
1377 iwm_nic_rx_init(struct iwm_softc *sc)
1378 {
1379         /*
1380          * Initialize RX ring.  This is from the iwn driver.
1381          */
1382         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1383
1384         /* Stop Rx DMA */
1385         iwm_pcie_rx_stop(sc);
1386
1387         if (!iwm_nic_lock(sc))
1388                 return EBUSY;
1389
1390         /* reset and flush pointers */
1391         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1392         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1393         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1394         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1395
1396         /* Set physical address of RX ring (256-byte aligned). */
1397         IWM_WRITE(sc,
1398             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1399
1400         /* Set physical address of RX status (16-byte aligned). */
1401         IWM_WRITE(sc,
1402             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1403
1404         /* Enable Rx DMA
1405          * XXX 5000 HW isn't supported by the iwm(4) driver.
1406          * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1407          *      the credit mechanism in 5000 HW RX FIFO
1408          * Direct rx interrupts to hosts
1409          * Rx buffer size 4 or 8k or 12k
1410          * RB timeout 0x10
1411          * 256 RBDs
1412          */
1413         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1414             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1415             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1416             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1417             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1418             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1419             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1420
1421         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1422
1423         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1424         if (sc->cfg->host_interrupt_operation_mode)
1425                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1426
1427         /*
1428          * Thus sayeth el jefe (iwlwifi) via a comment:
1429          *
1430          * This value should initially be 0 (before preparing any
1431          * RBs), should be 8 after preparing the first 8 RBs (for example)
1432          */
1433         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1434
1435         iwm_nic_unlock(sc);
1436
1437         return 0;
1438 }
1439
1440 static int
1441 iwm_nic_tx_init(struct iwm_softc *sc)
1442 {
1443         int qid;
1444
1445         if (!iwm_nic_lock(sc))
1446                 return EBUSY;
1447
1448         /* Deactivate TX scheduler. */
1449         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1450
1451         /* Set physical address of "keep warm" page (16-byte aligned). */
1452         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1453
1454         /* Initialize TX rings. */
1455         for (qid = 0; qid < nitems(sc->txq); qid++) {
1456                 struct iwm_tx_ring *txq = &sc->txq[qid];
1457
1458                 /* Set physical address of TX ring (256-byte aligned). */
1459                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1460                     txq->desc_dma.paddr >> 8);
1461                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1462                     "%s: loading ring %d descriptors (%p) at %lx\n",
1463                     __func__,
1464                     qid, txq->desc,
1465                     (unsigned long) (txq->desc_dma.paddr >> 8));
1466         }
1467
1468         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1469
1470         iwm_nic_unlock(sc);
1471
1472         return 0;
1473 }
1474
1475 static int
1476 iwm_nic_init(struct iwm_softc *sc)
1477 {
1478         int error;
1479
1480         iwm_apm_init(sc);
1481         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1482                 iwm_set_pwr(sc);
1483
1484         iwm_mvm_nic_config(sc);
1485
1486         if ((error = iwm_nic_rx_init(sc)) != 0)
1487                 return error;
1488
1489         /*
1490          * Ditto for TX, from iwn
1491          */
1492         if ((error = iwm_nic_tx_init(sc)) != 0)
1493                 return error;
1494
1495         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1496             "%s: shadow registers enabled\n", __func__);
1497         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1498
1499         return 0;
1500 }
1501
1502 int
1503 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1504 {
1505         if (!iwm_nic_lock(sc)) {
1506                 device_printf(sc->sc_dev,
1507                     "%s: cannot enable txq %d\n",
1508                     __func__,
1509                     qid);
1510                 return EBUSY;
1511         }
1512
1513         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1514
1515         if (qid == IWM_MVM_CMD_QUEUE) {
1516                 /* unactivate before configuration */
1517                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1518                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1519                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1520
1521                 iwm_nic_unlock(sc);
1522
1523                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1524
1525                 if (!iwm_nic_lock(sc)) {
1526                         device_printf(sc->sc_dev,
1527                             "%s: cannot enable txq %d\n", __func__, qid);
1528                         return EBUSY;
1529                 }
1530                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1531                 iwm_nic_unlock(sc);
1532
1533                 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1534                 /* Set scheduler window size and frame limit. */
1535                 iwm_write_mem32(sc,
1536                     sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1537                     sizeof(uint32_t),
1538                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1539                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1540                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1541                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1542
1543                 if (!iwm_nic_lock(sc)) {
1544                         device_printf(sc->sc_dev,
1545                             "%s: cannot enable txq %d\n", __func__, qid);
1546                         return EBUSY;
1547                 }
1548                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1549                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1550                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1551                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1552                     IWM_SCD_QUEUE_STTS_REG_MSK);
1553         } else {
1554                 struct iwm_scd_txq_cfg_cmd cmd;
1555                 int error;
1556
1557                 iwm_nic_unlock(sc);
1558
1559                 memset(&cmd, 0, sizeof(cmd));
1560                 cmd.scd_queue = qid;
1561                 cmd.enable = 1;
1562                 cmd.sta_id = sta_id;
1563                 cmd.tx_fifo = fifo;
1564                 cmd.aggregate = 0;
1565                 cmd.window = IWM_FRAME_LIMIT;
1566
1567                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1568                     sizeof(cmd), &cmd);
1569                 if (error) {
1570                         device_printf(sc->sc_dev,
1571                             "cannot enable txq %d\n", qid);
1572                         return error;
1573                 }
1574
1575                 if (!iwm_nic_lock(sc))
1576                         return EBUSY;
1577         }
1578
1579         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1580             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1581
1582         iwm_nic_unlock(sc);
1583
1584         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1585             __func__, qid, fifo);
1586
1587         return 0;
1588 }
1589
1590 static int
1591 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1592 {
1593         int error, chnl;
1594
1595         int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1596             IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1597
1598         if (!iwm_nic_lock(sc))
1599                 return EBUSY;
1600
1601         iwm_ict_reset(sc);
1602
1603         sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1604         if (scd_base_addr != 0 &&
1605             scd_base_addr != sc->scd_base_addr) {
1606                 device_printf(sc->sc_dev,
1607                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1608                     __func__, sc->scd_base_addr, scd_base_addr);
1609         }
1610
1611         iwm_nic_unlock(sc);
1612
1613         /* reset context data, TX status and translation data */
1614         error = iwm_write_mem(sc,
1615             sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1616             NULL, clear_dwords);
1617         if (error)
1618                 return EBUSY;
1619
1620         if (!iwm_nic_lock(sc))
1621                 return EBUSY;
1622
1623         /* Set physical address of TX scheduler rings (1KB aligned). */
1624         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1625
1626         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1627
1628         iwm_nic_unlock(sc);
1629
1630         /* enable command channel */
1631         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1632         if (error)
1633                 return error;
1634
1635         if (!iwm_nic_lock(sc))
1636                 return EBUSY;
1637
1638         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1639
1640         /* Enable DMA channels. */
1641         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1642                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1643                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1644                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1645         }
1646
1647         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1648             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1649
1650         iwm_nic_unlock(sc);
1651
1652         /* Enable L1-Active */
1653         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1654                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1655                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1656         }
1657
1658         return error;
1659 }
1660
1661 /*
1662  * NVM read access and content parsing.  We do not support
1663  * external NVM or writing NVM.
1664  * iwlwifi/mvm/nvm.c
1665  */
1666
1667 /* Default NVM size to read */
1668 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1669
1670 #define IWM_NVM_WRITE_OPCODE 1
1671 #define IWM_NVM_READ_OPCODE 0
1672
1673 /* load nvm chunk response */
1674 enum {
1675         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1676         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1677 };
1678
1679 static int
1680 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1681         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1682 {
1683         struct iwm_nvm_access_cmd nvm_access_cmd = {
1684                 .offset = htole16(offset),
1685                 .length = htole16(length),
1686                 .type = htole16(section),
1687                 .op_code = IWM_NVM_READ_OPCODE,
1688         };
1689         struct iwm_nvm_access_resp *nvm_resp;
1690         struct iwm_rx_packet *pkt;
1691         struct iwm_host_cmd cmd = {
1692                 .id = IWM_NVM_ACCESS_CMD,
1693                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1694                 .data = { &nvm_access_cmd, },
1695         };
1696         int ret, bytes_read, offset_read;
1697         uint8_t *resp_data;
1698
1699         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1700
1701         ret = iwm_send_cmd(sc, &cmd);
1702         if (ret) {
1703                 device_printf(sc->sc_dev,
1704                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1705                 return ret;
1706         }
1707
1708         pkt = cmd.resp_pkt;
1709
1710         /* Extract NVM response */
1711         nvm_resp = (void *)pkt->data;
1712         ret = le16toh(nvm_resp->status);
1713         bytes_read = le16toh(nvm_resp->length);
1714         offset_read = le16toh(nvm_resp->offset);
1715         resp_data = nvm_resp->data;
1716         if (ret) {
1717                 if ((offset != 0) &&
1718                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1719                         /*
1720                          * meaning of NOT_VALID_ADDRESS:
1721                          * driver try to read chunk from address that is
1722                          * multiple of 2K and got an error since addr is empty.
1723                          * meaning of (offset != 0): driver already
1724                          * read valid data from another chunk so this case
1725                          * is not an error.
1726                          */
1727                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1728                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1729                                     offset);
1730                         *len = 0;
1731                         ret = 0;
1732                 } else {
1733                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1734                                     "NVM access command failed with status %d\n", ret);
1735                         ret = EIO;
1736                 }
1737                 goto exit;
1738         }
1739
1740         if (offset_read != offset) {
1741                 device_printf(sc->sc_dev,
1742                     "NVM ACCESS response with invalid offset %d\n",
1743                     offset_read);
1744                 ret = EINVAL;
1745                 goto exit;
1746         }
1747
1748         if (bytes_read > length) {
1749                 device_printf(sc->sc_dev,
1750                     "NVM ACCESS response with too much data "
1751                     "(%d bytes requested, %d bytes received)\n",
1752                     length, bytes_read);
1753                 ret = EINVAL;
1754                 goto exit;
1755         }
1756
1757         /* Write data to NVM */
1758         memcpy(data + offset, resp_data, bytes_read);
1759         *len = bytes_read;
1760
1761  exit:
1762         iwm_free_resp(sc, &cmd);
1763         return ret;
1764 }
1765
1766 /*
1767  * Reads an NVM section completely.
1768  * NICs prior to 7000 family don't have a real NVM, but just read
1769  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1770  * by uCode, we need to manually check in this case that we don't
1771  * overflow and try to read more than the EEPROM size.
1772  * For 7000 family NICs, we supply the maximal size we can read, and
1773  * the uCode fills the response with as much data as we can,
1774  * without overflowing, so no check is needed.
1775  */
1776 static int
1777 iwm_nvm_read_section(struct iwm_softc *sc,
1778         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1779 {
1780         uint16_t seglen, length, offset = 0;
1781         int ret;
1782
1783         /* Set nvm section read length */
1784         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1785
1786         seglen = length;
1787
1788         /* Read the NVM until exhausted (reading less than requested) */
1789         while (seglen == length) {
1790                 /* Check no memory assumptions fail and cause an overflow */
1791                 if ((size_read + offset + length) >
1792                     sc->cfg->eeprom_size) {
1793                         device_printf(sc->sc_dev,
1794                             "EEPROM size is too small for NVM\n");
1795                         return ENOBUFS;
1796                 }
1797
1798                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1799                 if (ret) {
1800                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1801                                     "Cannot read NVM from section %d offset %d, length %d\n",
1802                                     section, offset, length);
1803                         return ret;
1804                 }
1805                 offset += seglen;
1806         }
1807
1808         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1809                     "NVM section %d read completed\n", section);
1810         *len = offset;
1811         return 0;
1812 }
1813
1814 /*
1815  * BEGIN IWM_NVM_PARSE
1816  */
1817
1818 /* iwlwifi/iwl-nvm-parse.c */
1819
1820 /* NVM offsets (in words) definitions */
1821 enum iwm_nvm_offsets {
1822         /* NVM HW-Section offset (in words) definitions */
1823         IWM_HW_ADDR = 0x15,
1824
1825 /* NVM SW-Section offset (in words) definitions */
1826         IWM_NVM_SW_SECTION = 0x1C0,
1827         IWM_NVM_VERSION = 0,
1828         IWM_RADIO_CFG = 1,
1829         IWM_SKU = 2,
1830         IWM_N_HW_ADDRS = 3,
1831         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1832
1833 /* NVM calibration section offset (in words) definitions */
1834         IWM_NVM_CALIB_SECTION = 0x2B8,
1835         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1836 };
1837
1838 enum iwm_8000_nvm_offsets {
1839         /* NVM HW-Section offset (in words) definitions */
1840         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1841         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1842         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1843         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1844         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1845
1846         /* NVM SW-Section offset (in words) definitions */
1847         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1848         IWM_NVM_VERSION_8000 = 0,
1849         IWM_RADIO_CFG_8000 = 0,
1850         IWM_SKU_8000 = 2,
1851         IWM_N_HW_ADDRS_8000 = 3,
1852
1853         /* NVM REGULATORY -Section offset (in words) definitions */
1854         IWM_NVM_CHANNELS_8000 = 0,
1855         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1856         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1857         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1858
1859         /* NVM calibration section offset (in words) definitions */
1860         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1861         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1862 };
1863
1864 /* SKU Capabilities (actual values from NVM definition) */
1865 enum nvm_sku_bits {
1866         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1867         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1868         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1869         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1870 };
1871
1872 /* radio config bits (actual values from NVM definition) */
1873 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1874 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1875 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1876 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1877 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1878 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1879
1880 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1881 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1882 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1883 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1884 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1885 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1886
1887 /**
1888  * enum iwm_nvm_channel_flags - channel flags in NVM
1889  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1890  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1891  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1892  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1893  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1894  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1895  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1896  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1897  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1898  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1899  */
1900 enum iwm_nvm_channel_flags {
1901         IWM_NVM_CHANNEL_VALID = (1 << 0),
1902         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1903         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1904         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1905         IWM_NVM_CHANNEL_DFS = (1 << 7),
1906         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1907         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1908         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1909         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1910 };
1911
1912 /*
1913  * Translate EEPROM flags to net80211.
1914  */
1915 static uint32_t
1916 iwm_eeprom_channel_flags(uint16_t ch_flags)
1917 {
1918         uint32_t nflags;
1919
1920         nflags = 0;
1921         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1922                 nflags |= IEEE80211_CHAN_PASSIVE;
1923         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1924                 nflags |= IEEE80211_CHAN_NOADHOC;
1925         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1926                 nflags |= IEEE80211_CHAN_DFS;
1927                 /* Just in case. */
1928                 nflags |= IEEE80211_CHAN_NOADHOC;
1929         }
1930
1931         return (nflags);
1932 }
1933
1934 static void
1935 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1936     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1937     const uint8_t bands[])
1938 {
1939         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1940         uint32_t nflags;
1941         uint16_t ch_flags;
1942         uint8_t ieee;
1943         int error;
1944
1945         for (; ch_idx < ch_num; ch_idx++) {
1946                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1947                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1948                         ieee = iwm_nvm_channels[ch_idx];
1949                 else
1950                         ieee = iwm_nvm_channels_8000[ch_idx];
1951
1952                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1953                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1954                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
1955                             ieee, ch_flags,
1956                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1957                             "5.2" : "2.4");
1958                         continue;
1959                 }
1960
1961                 nflags = iwm_eeprom_channel_flags(ch_flags);
1962                 error = ieee80211_add_channel(chans, maxchans, nchans,
1963                     ieee, 0, 0, nflags, bands);
1964                 if (error != 0)
1965                         break;
1966
1967                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1968                     "Ch. %d Flags %x [%sGHz] - Added\n",
1969                     ieee, ch_flags,
1970                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1971                     "5.2" : "2.4");
1972         }
1973 }
1974
1975 static void
1976 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1977     struct ieee80211_channel chans[])
1978 {
1979         struct iwm_softc *sc = ic->ic_softc;
1980         struct iwm_nvm_data *data = sc->nvm_data;
1981         uint8_t bands[IEEE80211_MODE_BYTES];
1982         size_t ch_num;
1983
1984         memset(bands, 0, sizeof(bands));
1985         /* 1-13: 11b/g channels. */
1986         setbit(bands, IEEE80211_MODE_11B);
1987         setbit(bands, IEEE80211_MODE_11G);
1988         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1989             IWM_NUM_2GHZ_CHANNELS - 1, bands);
1990
1991         /* 14: 11b channel only. */
1992         clrbit(bands, IEEE80211_MODE_11G);
1993         iwm_add_channel_band(sc, chans, maxchans, nchans,
1994             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
1995
1996         if (data->sku_cap_band_52GHz_enable) {
1997                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1998                         ch_num = nitems(iwm_nvm_channels);
1999                 else
2000                         ch_num = nitems(iwm_nvm_channels_8000);
2001                 memset(bands, 0, sizeof(bands));
2002                 setbit(bands, IEEE80211_MODE_11A);
2003                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2004                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2005         }
2006 }
2007
2008 static void
2009 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2010         const uint16_t *mac_override, const uint16_t *nvm_hw)
2011 {
2012         const uint8_t *hw_addr;
2013
2014         if (mac_override) {
2015                 static const uint8_t reserved_mac[] = {
2016                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2017                 };
2018
2019                 hw_addr = (const uint8_t *)(mac_override +
2020                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2021
2022                 /*
2023                  * Store the MAC address from MAO section.
2024                  * No byte swapping is required in MAO section
2025                  */
2026                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2027
2028                 /*
2029                  * Force the use of the OTP MAC address in case of reserved MAC
2030                  * address in the NVM, or if address is given but invalid.
2031                  */
2032                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2033                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2034                     iwm_is_valid_ether_addr(data->hw_addr) &&
2035                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2036                         return;
2037
2038                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2039                     "%s: mac address from nvm override section invalid\n",
2040                     __func__);
2041         }
2042
2043         if (nvm_hw) {
2044                 /* read the mac address from WFMP registers */
2045                 uint32_t mac_addr0 =
2046                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2047                 uint32_t mac_addr1 =
2048                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2049
2050                 hw_addr = (const uint8_t *)&mac_addr0;
2051                 data->hw_addr[0] = hw_addr[3];
2052                 data->hw_addr[1] = hw_addr[2];
2053                 data->hw_addr[2] = hw_addr[1];
2054                 data->hw_addr[3] = hw_addr[0];
2055
2056                 hw_addr = (const uint8_t *)&mac_addr1;
2057                 data->hw_addr[4] = hw_addr[1];
2058                 data->hw_addr[5] = hw_addr[0];
2059
2060                 return;
2061         }
2062
2063         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2064         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2065 }
2066
2067 static int
2068 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2069             const uint16_t *phy_sku)
2070 {
2071         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2072                 return le16_to_cpup(nvm_sw + IWM_SKU);
2073
2074         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2075 }
2076
2077 static int
2078 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2079 {
2080         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2081                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2082         else
2083                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2084                                                 IWM_NVM_VERSION_8000));
2085 }
2086
2087 static int
2088 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2089                   const uint16_t *phy_sku)
2090 {
2091         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2092                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2093
2094         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2095 }
2096
2097 static int
2098 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2099 {
2100         int n_hw_addr;
2101
2102         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2103                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2104
2105         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2106
2107         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2108 }
2109
2110 static void
2111 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2112                   uint32_t radio_cfg)
2113 {
2114         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2115                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2116                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2117                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2118                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2119                 return;
2120         }
2121
2122         /* set the radio configuration for family 8000 */
2123         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2124         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2125         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2126         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2127         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2128         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2129 }
2130
2131 static int
2132 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2133                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2134 {
2135 #ifdef notyet /* for FAMILY 9000 */
2136         if (cfg->mac_addr_from_csr) {
2137                 iwm_set_hw_address_from_csr(sc, data);
2138         } else
2139 #endif
2140         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2141                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2142
2143                 /* The byte order is little endian 16 bit, meaning 214365 */
2144                 data->hw_addr[0] = hw_addr[1];
2145                 data->hw_addr[1] = hw_addr[0];
2146                 data->hw_addr[2] = hw_addr[3];
2147                 data->hw_addr[3] = hw_addr[2];
2148                 data->hw_addr[4] = hw_addr[5];
2149                 data->hw_addr[5] = hw_addr[4];
2150         } else {
2151                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2152         }
2153
2154         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2155                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2156                 return EINVAL;
2157         }
2158
2159         return 0;
2160 }
2161
2162 static struct iwm_nvm_data *
2163 iwm_parse_nvm_data(struct iwm_softc *sc,
2164                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2165                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2166                    const uint16_t *phy_sku, const uint16_t *regulatory)
2167 {
2168         struct iwm_nvm_data *data;
2169         uint32_t sku, radio_cfg;
2170         uint16_t lar_config;
2171
2172         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2173                 data = malloc(sizeof(*data) +
2174                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2175                     M_DEVBUF, M_NOWAIT | M_ZERO);
2176         } else {
2177                 data = malloc(sizeof(*data) +
2178                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2179                     M_DEVBUF, M_NOWAIT | M_ZERO);
2180         }
2181         if (!data)
2182                 return NULL;
2183
2184         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2185
2186         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2187         iwm_set_radio_cfg(sc, data, radio_cfg);
2188
2189         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2190         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2191         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2192         data->sku_cap_11n_enable = 0;
2193
2194         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2195
2196         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2197                 uint16_t lar_offset = data->nvm_version < 0xE39 ?
2198                                        IWM_NVM_LAR_OFFSET_8000_OLD :
2199                                        IWM_NVM_LAR_OFFSET_8000;
2200
2201                 lar_config = le16_to_cpup(regulatory + lar_offset);
2202                 data->lar_enabled = !!(lar_config &
2203                                        IWM_NVM_LAR_ENABLED_8000);
2204         }
2205
2206         /* If no valid mac address was found - bail out */
2207         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2208                 free(data, M_DEVBUF);
2209                 return NULL;
2210         }
2211
2212         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2213                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2214                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2215         } else {
2216                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2217                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2218         }
2219
2220         return data;
2221 }
2222
2223 static void
2224 iwm_free_nvm_data(struct iwm_nvm_data *data)
2225 {
2226         if (data != NULL)
2227                 free(data, M_DEVBUF);
2228 }
2229
2230 static struct iwm_nvm_data *
2231 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2232 {
2233         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2234
2235         /* Checking for required sections */
2236         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2237                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2238                     !sections[sc->cfg->nvm_hw_section_num].data) {
2239                         device_printf(sc->sc_dev,
2240                             "Can't parse empty OTP/NVM sections\n");
2241                         return NULL;
2242                 }
2243         } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2244                 /* SW and REGULATORY sections are mandatory */
2245                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2246                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2247                         device_printf(sc->sc_dev,
2248                             "Can't parse empty OTP/NVM sections\n");
2249                         return NULL;
2250                 }
2251                 /* MAC_OVERRIDE or at least HW section must exist */
2252                 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2253                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2254                         device_printf(sc->sc_dev,
2255                             "Can't parse mac_address, empty sections\n");
2256                         return NULL;
2257                 }
2258
2259                 /* PHY_SKU section is mandatory in B0 */
2260                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2261                         device_printf(sc->sc_dev,
2262                             "Can't parse phy_sku in B0, empty sections\n");
2263                         return NULL;
2264                 }
2265         } else {
2266                 panic("unknown device family %d\n", sc->cfg->device_family);
2267         }
2268
2269         hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2270         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2271         calib = (const uint16_t *)
2272             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2273         regulatory = (const uint16_t *)
2274             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2275         mac_override = (const uint16_t *)
2276             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2277         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2278
2279         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2280             phy_sku, regulatory);
2281 }
2282
2283 static int
2284 iwm_nvm_init(struct iwm_softc *sc)
2285 {
2286         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2287         int i, ret, section;
2288         uint32_t size_read = 0;
2289         uint8_t *nvm_buffer, *temp;
2290         uint16_t len;
2291
2292         memset(nvm_sections, 0, sizeof(nvm_sections));
2293
2294         if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2295                 return EINVAL;
2296
2297         /* load NVM values from nic */
2298         /* Read From FW NVM */
2299         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2300
2301         nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2302         if (!nvm_buffer)
2303                 return ENOMEM;
2304         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2305                 /* we override the constness for initial read */
2306                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2307                                            &len, size_read);
2308                 if (ret)
2309                         continue;
2310                 size_read += len;
2311                 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2312                 if (!temp) {
2313                         ret = ENOMEM;
2314                         break;
2315                 }
2316                 memcpy(temp, nvm_buffer, len);
2317
2318                 nvm_sections[section].data = temp;
2319                 nvm_sections[section].length = len;
2320         }
2321         if (!size_read)
2322                 device_printf(sc->sc_dev, "OTP is blank\n");
2323         free(nvm_buffer, M_DEVBUF);
2324
2325         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2326         if (!sc->nvm_data)
2327                 return EINVAL;
2328         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2329                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2330
2331         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2332                 if (nvm_sections[i].data != NULL)
2333                         free(nvm_sections[i].data, M_DEVBUF);
2334         }
2335
2336         return 0;
2337 }
2338
2339 static int
2340 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2341         const struct iwm_fw_desc *section)
2342 {
2343         struct iwm_dma_info *dma = &sc->fw_dma;
2344         uint8_t *v_addr;
2345         bus_addr_t p_addr;
2346         uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2347         int ret = 0;
2348
2349         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2350                     "%s: [%d] uCode section being loaded...\n",
2351                     __func__, section_num);
2352
2353         v_addr = dma->vaddr;
2354         p_addr = dma->paddr;
2355
2356         for (offset = 0; offset < section->len; offset += chunk_sz) {
2357                 uint32_t copy_size, dst_addr;
2358                 int extended_addr = FALSE;
2359
2360                 copy_size = MIN(chunk_sz, section->len - offset);
2361                 dst_addr = section->offset + offset;
2362
2363                 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2364                     dst_addr <= IWM_FW_MEM_EXTENDED_END)
2365                         extended_addr = TRUE;
2366
2367                 if (extended_addr)
2368                         iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2369                                           IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2370
2371                 memcpy(v_addr, (const uint8_t *)section->data + offset,
2372                     copy_size);
2373                 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2374                 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2375                                                    copy_size);
2376
2377                 if (extended_addr)
2378                         iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2379                                             IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2380
2381                 if (ret) {
2382                         device_printf(sc->sc_dev,
2383                             "%s: Could not load the [%d] uCode section\n",
2384                             __func__, section_num);
2385                         break;
2386                 }
2387         }
2388
2389         return ret;
2390 }
2391
2392 /*
2393  * ucode
2394  */
2395 static int
2396 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2397                              bus_addr_t phy_addr, uint32_t byte_cnt)
2398 {
2399         int ret;
2400
2401         sc->sc_fw_chunk_done = 0;
2402
2403         if (!iwm_nic_lock(sc))
2404                 return EBUSY;
2405
2406         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2407             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2408
2409         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2410             dst_addr);
2411
2412         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2413             phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2414
2415         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2416             (iwm_get_dma_hi_addr(phy_addr)
2417              << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2418
2419         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2420             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2421             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2422             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2423
2424         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2425             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2426             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2427             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2428
2429         iwm_nic_unlock(sc);
2430
2431         /* wait up to 5s for this segment to load */
2432         ret = 0;
2433         while (!sc->sc_fw_chunk_done) {
2434                 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2435                 if (ret)
2436                         break;
2437         }
2438
2439         if (ret != 0) {
2440                 device_printf(sc->sc_dev,
2441                     "fw chunk addr 0x%x len %d failed to load\n",
2442                     dst_addr, byte_cnt);
2443                 return ETIMEDOUT;
2444         }
2445
2446         return 0;
2447 }
2448
2449 static int
2450 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2451         const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2452 {
2453         int shift_param;
2454         int i, ret = 0, sec_num = 0x1;
2455         uint32_t val, last_read_idx = 0;
2456
2457         if (cpu == 1) {
2458                 shift_param = 0;
2459                 *first_ucode_section = 0;
2460         } else {
2461                 shift_param = 16;
2462                 (*first_ucode_section)++;
2463         }
2464
2465         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2466                 last_read_idx = i;
2467
2468                 /*
2469                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2470                  * CPU1 to CPU2.
2471                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2472                  * CPU2 non paged to CPU2 paging sec.
2473                  */
2474                 if (!image->sec[i].data ||
2475                     image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2476                     image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2477                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2478                                     "Break since Data not valid or Empty section, sec = %d\n",
2479                                     i);
2480                         break;
2481                 }
2482                 ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2483                 if (ret)
2484                         return ret;
2485
2486                 /* Notify the ucode of the loaded section number and status */
2487                 if (iwm_nic_lock(sc)) {
2488                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2489                         val = val | (sec_num << shift_param);
2490                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2491                         sec_num = (sec_num << 1) | 0x1;
2492                         iwm_nic_unlock(sc);
2493                 }
2494         }
2495
2496         *first_ucode_section = last_read_idx;
2497
2498         iwm_enable_interrupts(sc);
2499
2500         if (iwm_nic_lock(sc)) {
2501                 if (cpu == 1)
2502                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2503                 else
2504                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2505                 iwm_nic_unlock(sc);
2506         }
2507
2508         return 0;
2509 }
2510
2511 static int
2512 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2513         const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2514 {
2515         int shift_param;
2516         int i, ret = 0;
2517         uint32_t last_read_idx = 0;
2518
2519         if (cpu == 1) {
2520                 shift_param = 0;
2521                 *first_ucode_section = 0;
2522         } else {
2523                 shift_param = 16;
2524                 (*first_ucode_section)++;
2525         }
2526
2527         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2528                 last_read_idx = i;
2529
2530                 /*
2531                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2532                  * CPU1 to CPU2.
2533                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2534                  * CPU2 non paged to CPU2 paging sec.
2535                  */
2536                 if (!image->sec[i].data ||
2537                     image->sec[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2538                     image->sec[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2539                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2540                                     "Break since Data not valid or Empty section, sec = %d\n",
2541                                      i);
2542                         break;
2543                 }
2544
2545                 ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2546                 if (ret)
2547                         return ret;
2548         }
2549
2550         *first_ucode_section = last_read_idx;
2551
2552         return 0;
2553
2554 }
2555
2556 static int
2557 iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2558 {
2559         int ret = 0;
2560         int first_ucode_section;
2561
2562         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2563                      image->is_dual_cpus ? "Dual" : "Single");
2564
2565         /* load to FW the binary non secured sections of CPU1 */
2566         ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2567         if (ret)
2568                 return ret;
2569
2570         if (image->is_dual_cpus) {
2571                 /* set CPU2 header address */
2572                 if (iwm_nic_lock(sc)) {
2573                         iwm_write_prph(sc,
2574                                        IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2575                                        IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2576                         iwm_nic_unlock(sc);
2577                 }
2578
2579                 /* load to FW the binary sections of CPU2 */
2580                 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2581                                                  &first_ucode_section);
2582                 if (ret)
2583                         return ret;
2584         }
2585
2586         iwm_enable_interrupts(sc);
2587
2588         /* release CPU reset */
2589         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2590
2591         return 0;
2592 }
2593
2594 int
2595 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2596         const struct iwm_fw_img *image)
2597 {
2598         int ret = 0;
2599         int first_ucode_section;
2600
2601         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2602                     image->is_dual_cpus ? "Dual" : "Single");
2603
2604         /* configure the ucode to be ready to get the secured image */
2605         /* release CPU reset */
2606         if (iwm_nic_lock(sc)) {
2607                 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2608                     IWM_RELEASE_CPU_RESET_BIT);
2609                 iwm_nic_unlock(sc);
2610         }
2611
2612         /* load to FW the binary Secured sections of CPU1 */
2613         ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2614             &first_ucode_section);
2615         if (ret)
2616                 return ret;
2617
2618         /* load to FW the binary sections of CPU2 */
2619         return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2620             &first_ucode_section);
2621 }
2622
2623 /* XXX Get rid of this definition */
2624 static inline void
2625 iwm_enable_fw_load_int(struct iwm_softc *sc)
2626 {
2627         IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2628         sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2629         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2630 }
2631
2632 /* XXX Add proper rfkill support code */
2633 static int
2634 iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2635 {
2636         int ret;
2637
2638         /* This may fail if AMT took ownership of the device */
2639         if (iwm_prepare_card_hw(sc)) {
2640                 device_printf(sc->sc_dev,
2641                     "%s: Exit HW not ready\n", __func__);
2642                 ret = EIO;
2643                 goto out;
2644         }
2645
2646         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2647
2648         iwm_disable_interrupts(sc);
2649
2650         /* make sure rfkill handshake bits are cleared */
2651         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2652         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2653             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2654
2655         /* clear (again), then enable host interrupts */
2656         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2657
2658         ret = iwm_nic_init(sc);
2659         if (ret) {
2660                 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2661                 goto out;
2662         }
2663
2664         /*
2665          * Now, we load the firmware and don't want to be interrupted, even
2666          * by the RF-Kill interrupt (hence mask all the interrupt besides the
2667          * FH_TX interrupt which is needed to load the firmware). If the
2668          * RF-Kill switch is toggled, we will find out after having loaded
2669          * the firmware and return the proper value to the caller.
2670          */
2671         iwm_enable_fw_load_int(sc);
2672
2673         /* really make sure rfkill handshake bits are cleared */
2674         /* maybe we should write a few times more?  just to make sure */
2675         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2676         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2677
2678         /* Load the given image to the HW */
2679         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2680                 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2681         else
2682                 ret = iwm_pcie_load_given_ucode(sc, fw);
2683
2684         /* XXX re-check RF-Kill state */
2685
2686 out:
2687         return ret;
2688 }
2689
2690 static int
2691 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2692 {
2693         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2694                 .valid = htole32(valid_tx_ant),
2695         };
2696
2697         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2698             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2699 }
2700
2701 /* iwlwifi: mvm/fw.c */
2702 static int
2703 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2704 {
2705         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2706         enum iwm_ucode_type ucode_type = sc->cur_ucode;
2707
2708         /* Set parameters */
2709         phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2710         phy_cfg_cmd.calib_control.event_trigger =
2711             sc->sc_default_calib[ucode_type].event_trigger;
2712         phy_cfg_cmd.calib_control.flow_trigger =
2713             sc->sc_default_calib[ucode_type].flow_trigger;
2714
2715         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2716             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2717         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2718             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2719 }
2720
2721 static int
2722 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2723 {
2724         struct iwm_mvm_alive_data *alive_data = data;
2725         struct iwm_mvm_alive_resp_ver1 *palive1;
2726         struct iwm_mvm_alive_resp_ver2 *palive2;
2727         struct iwm_mvm_alive_resp *palive;
2728
2729         if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2730                 palive1 = (void *)pkt->data;
2731
2732                 sc->support_umac_log = FALSE;
2733                 sc->error_event_table =
2734                         le32toh(palive1->error_event_table_ptr);
2735                 sc->log_event_table =
2736                         le32toh(palive1->log_event_table_ptr);
2737                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2738
2739                 alive_data->valid = le16toh(palive1->status) ==
2740                                     IWM_ALIVE_STATUS_OK;
2741                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2742                             "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2743                              le16toh(palive1->status), palive1->ver_type,
2744                              palive1->ver_subtype, palive1->flags);
2745         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2746                 palive2 = (void *)pkt->data;
2747                 sc->error_event_table =
2748                         le32toh(palive2->error_event_table_ptr);
2749                 sc->log_event_table =
2750                         le32toh(palive2->log_event_table_ptr);
2751                 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2752                 sc->umac_error_event_table =
2753                         le32toh(palive2->error_info_addr);
2754
2755                 alive_data->valid = le16toh(palive2->status) ==
2756                                     IWM_ALIVE_STATUS_OK;
2757                 if (sc->umac_error_event_table)
2758                         sc->support_umac_log = TRUE;
2759
2760                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2761                             "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2762                             le16toh(palive2->status), palive2->ver_type,
2763                             palive2->ver_subtype, palive2->flags);
2764
2765                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2766                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2767                             palive2->umac_major, palive2->umac_minor);
2768         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2769                 palive = (void *)pkt->data;
2770
2771                 sc->error_event_table =
2772                         le32toh(palive->error_event_table_ptr);
2773                 sc->log_event_table =
2774                         le32toh(palive->log_event_table_ptr);
2775                 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2776                 sc->umac_error_event_table =
2777                         le32toh(palive->error_info_addr);
2778
2779                 alive_data->valid = le16toh(palive->status) ==
2780                                     IWM_ALIVE_STATUS_OK;
2781                 if (sc->umac_error_event_table)
2782                         sc->support_umac_log = TRUE;
2783
2784                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2785                             "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2786                             le16toh(palive->status), palive->ver_type,
2787                             palive->ver_subtype, palive->flags);
2788
2789                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2790                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2791                             le32toh(palive->umac_major),
2792                             le32toh(palive->umac_minor));
2793         }
2794
2795         return TRUE;
2796 }
2797
2798 static int
2799 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2800         struct iwm_rx_packet *pkt, void *data)
2801 {
2802         struct iwm_phy_db *phy_db = data;
2803
2804         if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2805                 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2806                         device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2807                             __func__, pkt->hdr.code);
2808                 }
2809                 return TRUE;
2810         }
2811
2812         if (iwm_phy_db_set_section(phy_db, pkt)) {
2813                 device_printf(sc->sc_dev,
2814                     "%s: iwm_phy_db_set_section failed\n", __func__);
2815         }
2816
2817         return FALSE;
2818 }
2819
2820 static int
2821 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2822         enum iwm_ucode_type ucode_type)
2823 {
2824         struct iwm_notification_wait alive_wait;
2825         struct iwm_mvm_alive_data alive_data;
2826         const struct iwm_fw_img *fw;
2827         enum iwm_ucode_type old_type = sc->cur_ucode;
2828         int error;
2829         static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2830
2831         fw = &sc->sc_fw.img[ucode_type];
2832         sc->cur_ucode = ucode_type;
2833         sc->ucode_loaded = FALSE;
2834
2835         memset(&alive_data, 0, sizeof(alive_data));
2836         iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2837                                    alive_cmd, nitems(alive_cmd),
2838                                    iwm_alive_fn, &alive_data);
2839
2840         error = iwm_start_fw(sc, fw);
2841         if (error) {
2842                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2843                 sc->cur_ucode = old_type;
2844                 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2845                 return error;
2846         }
2847
2848         /*
2849          * Some things may run in the background now, but we
2850          * just wait for the ALIVE notification here.
2851          */
2852         IWM_UNLOCK(sc);
2853         error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2854                                       IWM_MVM_UCODE_ALIVE_TIMEOUT);
2855         IWM_LOCK(sc);
2856         if (error) {
2857                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2858                         uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2859                         if (iwm_nic_lock(sc)) {
2860                                 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2861                                 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2862                                 iwm_nic_unlock(sc);
2863                         }
2864                         device_printf(sc->sc_dev,
2865                             "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2866                             a, b);
2867                 }
2868                 sc->cur_ucode = old_type;
2869                 return error;
2870         }
2871
2872         if (!alive_data.valid) {
2873                 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2874                     __func__);
2875                 sc->cur_ucode = old_type;
2876                 return EIO;
2877         }
2878
2879         iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2880
2881         /*
2882          * configure and operate fw paging mechanism.
2883          * driver configures the paging flow only once, CPU2 paging image
2884          * included in the IWM_UCODE_INIT image.
2885          */
2886         if (fw->paging_mem_size) {
2887                 error = iwm_save_fw_paging(sc, fw);
2888                 if (error) {
2889                         device_printf(sc->sc_dev,
2890                             "%s: failed to save the FW paging image\n",
2891                             __func__);
2892                         return error;
2893                 }
2894
2895                 error = iwm_send_paging_cmd(sc, fw);
2896                 if (error) {
2897                         device_printf(sc->sc_dev,
2898                             "%s: failed to send the paging cmd\n", __func__);
2899                         iwm_free_fw_paging(sc);
2900                         return error;
2901                 }
2902         }
2903
2904         if (!error)
2905                 sc->ucode_loaded = TRUE;
2906         return error;
2907 }
2908
2909 /*
2910  * mvm misc bits
2911  */
2912
2913 /*
2914  * follows iwlwifi/fw.c
2915  */
2916 static int
2917 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2918 {
2919         struct iwm_notification_wait calib_wait;
2920         static const uint16_t init_complete[] = {
2921                 IWM_INIT_COMPLETE_NOTIF,
2922                 IWM_CALIB_RES_NOTIF_PHY_DB
2923         };
2924         int ret;
2925
2926         /* do not operate with rfkill switch turned on */
2927         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2928                 device_printf(sc->sc_dev,
2929                     "radio is disabled by hardware switch\n");
2930                 return EPERM;
2931         }
2932
2933         iwm_init_notification_wait(sc->sc_notif_wait,
2934                                    &calib_wait,
2935                                    init_complete,
2936                                    nitems(init_complete),
2937                                    iwm_wait_phy_db_entry,
2938                                    sc->sc_phy_db);
2939
2940         /* Will also start the device */
2941         ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2942         if (ret) {
2943                 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2944                     ret);
2945                 goto error;
2946         }
2947
2948         if (justnvm) {
2949                 /* Read nvm */
2950                 ret = iwm_nvm_init(sc);
2951                 if (ret) {
2952                         device_printf(sc->sc_dev, "failed to read nvm\n");
2953                         goto error;
2954                 }
2955                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2956                 goto error;
2957         }
2958
2959         ret = iwm_send_bt_init_conf(sc);
2960         if (ret) {
2961                 device_printf(sc->sc_dev,
2962                     "failed to send bt coex configuration: %d\n", ret);
2963                 goto error;
2964         }
2965
2966         /* Send TX valid antennas before triggering calibrations */
2967         ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
2968         if (ret) {
2969                 device_printf(sc->sc_dev,
2970                     "failed to send antennas before calibration: %d\n", ret);
2971                 goto error;
2972         }
2973
2974         /*
2975          * Send phy configurations command to init uCode
2976          * to start the 16.0 uCode init image internal calibrations.
2977          */
2978         ret = iwm_send_phy_cfg_cmd(sc);
2979         if (ret) {
2980                 device_printf(sc->sc_dev,
2981                     "%s: Failed to run INIT calibrations: %d\n",
2982                     __func__, ret);
2983                 goto error;
2984         }
2985
2986         /*
2987          * Nothing to do but wait for the init complete notification
2988          * from the firmware.
2989          */
2990         IWM_UNLOCK(sc);
2991         ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
2992             IWM_MVM_UCODE_CALIB_TIMEOUT);
2993         IWM_LOCK(sc);
2994
2995
2996         goto out;
2997
2998 error:
2999         iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3000 out:
3001         return ret;
3002 }
3003
3004 static int
3005 iwm_mvm_config_ltr(struct iwm_softc *sc)
3006 {
3007         struct iwm_ltr_config_cmd cmd = {
3008                 .flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
3009         };
3010
3011         if (!sc->sc_ltr_enabled)
3012                 return 0;
3013
3014         return iwm_mvm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
3015 }
3016
3017 /*
3018  * receive side
3019  */
3020
3021 /* (re)stock rx ring, called at init-time and at runtime */
3022 static int
3023 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3024 {
3025         struct iwm_rx_ring *ring = &sc->rxq;
3026         struct iwm_rx_data *data = &ring->data[idx];
3027         struct mbuf *m;
3028         bus_dmamap_t dmamap;
3029         bus_dma_segment_t seg;
3030         int nsegs, error;
3031
3032         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3033         if (m == NULL)
3034                 return ENOBUFS;
3035
3036         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3037         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3038             &seg, &nsegs, BUS_DMA_NOWAIT);
3039         if (error != 0) {
3040                 device_printf(sc->sc_dev,
3041                     "%s: can't map mbuf, error %d\n", __func__, error);
3042                 m_freem(m);
3043                 return error;
3044         }
3045
3046         if (data->m != NULL)
3047                 bus_dmamap_unload(ring->data_dmat, data->map);
3048
3049         /* Swap ring->spare_map with data->map */
3050         dmamap = data->map;
3051         data->map = ring->spare_map;
3052         ring->spare_map = dmamap;
3053
3054         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3055         data->m = m;
3056
3057         /* Update RX descriptor. */
3058         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3059         ring->desc[idx] = htole32(seg.ds_addr >> 8);
3060         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3061             BUS_DMASYNC_PREWRITE);
3062
3063         return 0;
3064 }
3065
3066 /* iwlwifi: mvm/rx.c */
3067 /*
3068  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3069  * values are reported by the fw as positive values - need to negate
3070  * to obtain their dBM.  Account for missing antennas by replacing 0
3071  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3072  */
3073 static int
3074 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3075 {
3076         int energy_a, energy_b, energy_c, max_energy;
3077         uint32_t val;
3078
3079         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3080         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3081             IWM_RX_INFO_ENERGY_ANT_A_POS;
3082         energy_a = energy_a ? -energy_a : -256;
3083         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3084             IWM_RX_INFO_ENERGY_ANT_B_POS;
3085         energy_b = energy_b ? -energy_b : -256;
3086         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3087             IWM_RX_INFO_ENERGY_ANT_C_POS;
3088         energy_c = energy_c ? -energy_c : -256;
3089         max_energy = MAX(energy_a, energy_b);
3090         max_energy = MAX(max_energy, energy_c);
3091
3092         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3093             "energy In A %d B %d C %d , and max %d\n",
3094             energy_a, energy_b, energy_c, max_energy);
3095
3096         return max_energy;
3097 }
3098
3099 static void
3100 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3101 {
3102         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3103
3104         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3105
3106         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3107 }
3108
3109 /*
3110  * Retrieve the average noise (in dBm) among receivers.
3111  */
3112 static int
3113 iwm_get_noise(struct iwm_softc *sc,
3114     const struct iwm_mvm_statistics_rx_non_phy *stats)
3115 {
3116         int i, total, nbant, noise;
3117
3118         total = nbant = noise = 0;
3119         for (i = 0; i < 3; i++) {
3120                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3121                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3122                     __func__,
3123                     i,
3124                     noise);
3125
3126                 if (noise) {
3127                         total += noise;
3128                         nbant++;
3129                 }
3130         }
3131
3132         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3133             __func__, nbant, total);
3134 #if 0
3135         /* There should be at least one antenna but check anyway. */
3136         return (nbant == 0) ? -127 : (total / nbant) - 107;
3137 #else
3138         /* For now, just hard-code it to -96 to be safe */
3139         return (-96);
3140 #endif
3141 }
3142
3143 static void
3144 iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3145 {
3146         struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3147
3148         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3149         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3150 }
3151
3152 /*
3153  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3154  *
3155  * Handles the actual data of the Rx packet from the fw
3156  */
3157 static boolean_t
3158 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3159         boolean_t stolen)
3160 {
3161         struct ieee80211com *ic = &sc->sc_ic;
3162         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3163         struct ieee80211_frame *wh;
3164         struct ieee80211_node *ni;
3165         struct ieee80211_rx_stats rxs;
3166         struct iwm_rx_phy_info *phy_info;
3167         struct iwm_rx_mpdu_res_start *rx_res;
3168         struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3169         uint32_t len;
3170         uint32_t rx_pkt_status;
3171         int rssi;
3172
3173         phy_info = &sc->sc_last_phy_info;
3174         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3175         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3176         len = le16toh(rx_res->byte_count);
3177         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3178
3179         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3180                 device_printf(sc->sc_dev,
3181                     "dsp size out of range [0,20]: %d\n",
3182                     phy_info->cfg_phy_cnt);
3183                 goto fail;
3184         }
3185
3186         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3187             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3188                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3189                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3190                 goto fail;
3191         }
3192
3193         rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3194
3195         /* Map it to relative value */
3196         rssi = rssi - sc->sc_noise;
3197
3198         /* replenish ring for the buffer we're going to feed to the sharks */
3199         if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3200                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3201                     __func__);
3202                 goto fail;
3203         }
3204
3205         m->m_data = pkt->data + sizeof(*rx_res);
3206         m->m_pkthdr.len = m->m_len = len;
3207
3208         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3209             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3210
3211         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3212
3213         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3214             "%s: phy_info: channel=%d, flags=0x%08x\n",
3215             __func__,
3216             le16toh(phy_info->channel),
3217             le16toh(phy_info->phy_flags));
3218
3219         /*
3220          * Populate an RX state struct with the provided information.
3221          */
3222         bzero(&rxs, sizeof(rxs));
3223         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3224         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3225         rxs.c_ieee = le16toh(phy_info->channel);
3226         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3227                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3228         } else {
3229                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3230         }
3231
3232         /* rssi is in 1/2db units */
3233         rxs.c_rssi = rssi * 2;
3234         rxs.c_nf = sc->sc_noise;
3235         if (ieee80211_add_rx_params(m, &rxs) == 0) {
3236                 if (ni)
3237                         ieee80211_free_node(ni);
3238                 goto fail;
3239         }
3240
3241         if (ieee80211_radiotap_active_vap(vap)) {
3242                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3243
3244                 tap->wr_flags = 0;
3245                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3246                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3247                 tap->wr_chan_freq = htole16(rxs.c_freq);
3248                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3249                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3250                 tap->wr_dbm_antsignal = (int8_t)rssi;
3251                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3252                 tap->wr_tsft = phy_info->system_timestamp;
3253                 switch (phy_info->rate) {
3254                 /* CCK rates. */
3255                 case  10: tap->wr_rate =   2; break;
3256                 case  20: tap->wr_rate =   4; break;
3257                 case  55: tap->wr_rate =  11; break;
3258                 case 110: tap->wr_rate =  22; break;
3259                 /* OFDM rates. */
3260                 case 0xd: tap->wr_rate =  12; break;
3261                 case 0xf: tap->wr_rate =  18; break;
3262                 case 0x5: tap->wr_rate =  24; break;
3263                 case 0x7: tap->wr_rate =  36; break;
3264                 case 0x9: tap->wr_rate =  48; break;
3265                 case 0xb: tap->wr_rate =  72; break;
3266                 case 0x1: tap->wr_rate =  96; break;
3267                 case 0x3: tap->wr_rate = 108; break;
3268                 /* Unknown rate: should not happen. */
3269                 default:  tap->wr_rate =   0;
3270                 }
3271         }
3272
3273         IWM_UNLOCK(sc);
3274         if (ni != NULL) {
3275                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3276                 ieee80211_input_mimo(ni, m);
3277                 ieee80211_free_node(ni);
3278         } else {
3279                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3280                 ieee80211_input_mimo_all(ic, m);
3281         }
3282         IWM_LOCK(sc);
3283
3284         return TRUE;
3285
3286 fail:
3287         counter_u64_add(ic->ic_ierrors, 1);
3288         return FALSE;
3289 }
3290
3291 static int
3292 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3293         struct iwm_node *in)
3294 {
3295         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3296         struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3297         struct ieee80211_node *ni = &in->in_ni;
3298         struct ieee80211vap *vap = ni->ni_vap;
3299         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3300         int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3301         boolean_t rate_matched;
3302         uint8_t tx_resp_rate;
3303
3304         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3305
3306         /* Update rate control statistics. */
3307         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3308             __func__,
3309             (int) le16toh(tx_resp->status.status),
3310             (int) le16toh(tx_resp->status.sequence),
3311             tx_resp->frame_count,
3312             tx_resp->bt_kill_count,
3313             tx_resp->failure_rts,
3314             tx_resp->failure_frame,
3315             le32toh(tx_resp->initial_rate),
3316             (int) le16toh(tx_resp->wireless_media_time));
3317
3318         tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3319
3320         /* For rate control, ignore frames sent at different initial rate */
3321         rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3322
3323         if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3324                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3325                     "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3326                     "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3327         }
3328
3329         txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3330                      IEEE80211_RATECTL_STATUS_LONG_RETRY;
3331         txs->short_retries = tx_resp->failure_rts;
3332         txs->long_retries = tx_resp->failure_frame;
3333         if (status != IWM_TX_STATUS_SUCCESS &&
3334             status != IWM_TX_STATUS_DIRECT_DONE) {
3335                 switch (status) {
3336                 case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3337                         txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3338                         break;
3339                 case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3340                         txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3341                         break;
3342                 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3343                         txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3344                         break;
3345                 default:
3346                         txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3347                         break;
3348                 }
3349         } else {
3350                 txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3351         }
3352
3353         if (rate_matched) {
3354                 ieee80211_ratectl_tx_complete(ni, txs);
3355
3356                 int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3357                 new_rate = vap->iv_bss->ni_txrate;
3358                 if (new_rate != 0 && new_rate != cur_rate) {
3359                         struct iwm_node *in = IWM_NODE(vap->iv_bss);
3360                         iwm_setrates(sc, in, rix);
3361                         iwm_mvm_send_lq_cmd(sc, &in->in_lq, FALSE);
3362                 }
3363         }
3364
3365         return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3366 }
3367
3368 static void
3369 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3370 {
3371         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3372         int idx = cmd_hdr->idx;
3373         int qid = cmd_hdr->qid;
3374         struct iwm_tx_ring *ring = &sc->txq[qid];
3375         struct iwm_tx_data *txd = &ring->data[idx];
3376         struct iwm_node *in = txd->in;
3377         struct mbuf *m = txd->m;
3378         int status;
3379
3380         KASSERT(txd->done == 0, ("txd not done"));
3381         KASSERT(txd->in != NULL, ("txd without node"));
3382         KASSERT(txd->m != NULL, ("txd without mbuf"));
3383
3384         sc->sc_tx_timer = 0;
3385
3386         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3387
3388         /* Unmap and free mbuf. */
3389         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3390         bus_dmamap_unload(ring->data_dmat, txd->map);
3391
3392         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3393             "free txd %p, in %p\n", txd, txd->in);
3394         txd->done = 1;
3395         txd->m = NULL;
3396         txd->in = NULL;
3397
3398         ieee80211_tx_complete(&in->in_ni, m, status);
3399
3400         if (--ring->queued < IWM_TX_RING_LOMARK) {
3401                 sc->qfullmsk &= ~(1 << ring->qid);
3402                 if (sc->qfullmsk == 0) {
3403                         iwm_start(sc);
3404                 }
3405         }
3406 }
3407
3408 /*
3409  * transmit side
3410  */
3411
3412 /*
3413  * Process a "command done" firmware notification.  This is where we wakeup
3414  * processes waiting for a synchronous command completion.
3415  * from if_iwn
3416  */
3417 static void
3418 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3419 {
3420         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3421         struct iwm_tx_data *data;
3422
3423         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3424                 return; /* Not a command ack. */
3425         }
3426
3427         /* XXX wide commands? */
3428         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3429             "cmd notification type 0x%x qid %d idx %d\n",
3430             pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3431
3432         data = &ring->data[pkt->hdr.idx];
3433
3434         /* If the command was mapped in an mbuf, free it. */
3435         if (data->m != NULL) {
3436                 bus_dmamap_sync(ring->data_dmat, data->map,
3437                     BUS_DMASYNC_POSTWRITE);
3438                 bus_dmamap_unload(ring->data_dmat, data->map);
3439                 m_freem(data->m);
3440                 data->m = NULL;
3441         }
3442         wakeup(&ring->desc[pkt->hdr.idx]);
3443
3444         if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3445                 device_printf(sc->sc_dev,
3446                     "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3447                     __func__, pkt->hdr.idx, ring->queued, ring->cur);
3448                 /* XXX call iwm_force_nmi() */
3449         }
3450
3451         KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3452         ring->queued--;
3453         if (ring->queued == 0)
3454                 iwm_pcie_clear_cmd_in_flight(sc);
3455 }
3456
3457 #if 0
3458 /*
3459  * necessary only for block ack mode
3460  */
3461 void
3462 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3463         uint16_t len)
3464 {
3465         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3466         uint16_t w_val;
3467
3468         scd_bc_tbl = sc->sched_dma.vaddr;
3469
3470         len += 8; /* magic numbers came naturally from paris */
3471         len = roundup(len, 4) / 4;
3472
3473         w_val = htole16(sta_id << 12 | len);
3474
3475         /* Update TX scheduler. */
3476         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3477         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3478             BUS_DMASYNC_PREWRITE);
3479
3480         /* I really wonder what this is ?!? */
3481         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3482                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3483                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3484                     BUS_DMASYNC_PREWRITE);
3485         }
3486 }
3487 #endif
3488
3489 static int
3490 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3491 {
3492         int i;
3493
3494         for (i = 0; i < nitems(iwm_rates); i++) {
3495                 if (iwm_rates[i].rate == rate)
3496                         return (i);
3497         }
3498         /* XXX error? */
3499         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3500             "%s: couldn't find an entry for rate=%d\n",
3501             __func__,
3502             rate);
3503         return (0);
3504 }
3505
3506 /*
3507  * Fill in the rate related information for a transmit command.
3508  */
3509 static const struct iwm_rate *
3510 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3511         struct mbuf *m, struct iwm_tx_cmd *tx)
3512 {
3513         struct ieee80211_node *ni = &in->in_ni;
3514         struct ieee80211_frame *wh;
3515         const struct ieee80211_txparam *tp = ni->ni_txparms;
3516         const struct iwm_rate *rinfo;
3517         int type;
3518         int ridx, rate_flags;
3519
3520         wh = mtod(m, struct ieee80211_frame *);
3521         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3522
3523         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3524         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3525
3526         if (type == IEEE80211_FC0_TYPE_MGT ||
3527             type == IEEE80211_FC0_TYPE_CTL ||
3528             (m->m_flags & M_EAPOL) != 0) {
3529                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3530                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3531                     "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3532         } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3533                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3534                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3535                     "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3536         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3537                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3538                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3539                     "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3540         } else {
3541                 /* for data frames, use RS table */
3542                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3543                 ridx = iwm_rate2ridx(sc, ni->ni_txrate);
3544                 if (ridx == -1)
3545                         ridx = 0;
3546
3547                 /* This is the index into the programmed table */
3548                 tx->initial_rate_index = 0;
3549                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3550         }
3551
3552         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3553             "%s: frame type=%d txrate %d\n",
3554                 __func__, type, iwm_rates[ridx].rate);
3555
3556         rinfo = &iwm_rates[ridx];
3557
3558         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3559             __func__, ridx,
3560             rinfo->rate,
3561             !! (IWM_RIDX_IS_CCK(ridx))
3562             );
3563
3564         /* XXX TODO: hard-coded TX antenna? */
3565         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3566         if (IWM_RIDX_IS_CCK(ridx))
3567                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3568         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3569
3570         return rinfo;
3571 }
3572
3573 #define TB0_SIZE 16
3574 static int
3575 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3576 {
3577         struct ieee80211com *ic = &sc->sc_ic;
3578         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3579         struct iwm_node *in = IWM_NODE(ni);
3580         struct iwm_tx_ring *ring;
3581         struct iwm_tx_data *data;
3582         struct iwm_tfd *desc;
3583         struct iwm_device_cmd *cmd;
3584         struct iwm_tx_cmd *tx;
3585         struct ieee80211_frame *wh;
3586         struct ieee80211_key *k = NULL;
3587         struct mbuf *m1;
3588         const struct iwm_rate *rinfo;
3589         uint32_t flags;
3590         u_int hdrlen;
3591         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3592         int nsegs;
3593         uint8_t tid, type;
3594         int i, totlen, error, pad;
3595
3596         wh = mtod(m, struct ieee80211_frame *);
3597         hdrlen = ieee80211_anyhdrsize(wh);
3598         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3599         tid = 0;
3600         ring = &sc->txq[ac];
3601         desc = &ring->desc[ring->cur];
3602         memset(desc, 0, sizeof(*desc));
3603         data = &ring->data[ring->cur];
3604
3605         /* Fill out iwm_tx_cmd to send to the firmware */
3606         cmd = &ring->cmd[ring->cur];
3607         cmd->hdr.code = IWM_TX_CMD;
3608         cmd->hdr.flags = 0;
3609         cmd->hdr.qid = ring->qid;
3610         cmd->hdr.idx = ring->cur;
3611
3612         tx = (void *)cmd->data;
3613         memset(tx, 0, sizeof(*tx));
3614
3615         rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3616
3617         /* Encrypt the frame if need be. */
3618         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3619                 /* Retrieve key for TX && do software encryption. */
3620                 k = ieee80211_crypto_encap(ni, m);
3621                 if (k == NULL) {
3622                         m_freem(m);
3623                         return (ENOBUFS);
3624                 }
3625                 /* 802.11 header may have moved. */
3626                 wh = mtod(m, struct ieee80211_frame *);
3627         }
3628
3629         if (ieee80211_radiotap_active_vap(vap)) {
3630                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3631
3632                 tap->wt_flags = 0;
3633                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3634                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3635                 tap->wt_rate = rinfo->rate;
3636                 if (k != NULL)
3637                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3638                 ieee80211_radiotap_tx(vap, m);
3639         }
3640
3641
3642         totlen = m->m_pkthdr.len;
3643
3644         flags = 0;
3645         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3646                 flags |= IWM_TX_CMD_FLG_ACK;
3647         }
3648
3649         if (type == IEEE80211_FC0_TYPE_DATA
3650             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3651             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3652                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3653         }
3654
3655         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3656             type != IEEE80211_FC0_TYPE_DATA)
3657                 tx->sta_id = sc->sc_aux_sta.sta_id;
3658         else
3659                 tx->sta_id = IWM_STATION_ID;
3660
3661         if (type == IEEE80211_FC0_TYPE_MGT) {
3662                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3663
3664                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3665                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3666                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3667                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3668                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3669                 } else {
3670                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3671                 }
3672         } else {
3673                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3674         }
3675
3676         if (hdrlen & 3) {
3677                 /* First segment length must be a multiple of 4. */
3678                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3679                 pad = 4 - (hdrlen & 3);
3680         } else
3681                 pad = 0;
3682
3683         tx->driver_txop = 0;
3684         tx->next_frame_len = 0;
3685
3686         tx->len = htole16(totlen);
3687         tx->tid_tspec = tid;
3688         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3689
3690         /* Set physical address of "scratch area". */
3691         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3692         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3693
3694         /* Copy 802.11 header in TX command. */
3695         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3696
3697         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3698
3699         tx->sec_ctl = 0;
3700         tx->tx_flags |= htole32(flags);
3701
3702         /* Trim 802.11 header. */
3703         m_adj(m, hdrlen);
3704         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3705             segs, &nsegs, BUS_DMA_NOWAIT);
3706         if (error != 0) {
3707                 if (error != EFBIG) {
3708                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3709                             error);
3710                         m_freem(m);
3711                         return error;
3712                 }
3713                 /* Too many DMA segments, linearize mbuf. */
3714                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3715                 if (m1 == NULL) {
3716                         device_printf(sc->sc_dev,
3717                             "%s: could not defrag mbuf\n", __func__);
3718                         m_freem(m);
3719                         return (ENOBUFS);
3720                 }
3721                 m = m1;
3722
3723                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3724                     segs, &nsegs, BUS_DMA_NOWAIT);
3725                 if (error != 0) {
3726                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3727                             error);
3728                         m_freem(m);
3729                         return error;
3730                 }
3731         }
3732         data->m = m;
3733         data->in = in;
3734         data->done = 0;
3735
3736         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3737             "sending txd %p, in %p\n", data, data->in);
3738         KASSERT(data->in != NULL, ("node is NULL"));
3739
3740         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3741             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3742             ring->qid, ring->cur, totlen, nsegs,
3743             le32toh(tx->tx_flags),
3744             le32toh(tx->rate_n_flags),
3745             tx->initial_rate_index
3746             );
3747
3748         /* Fill TX descriptor. */
3749         desc->num_tbs = 2 + nsegs;
3750
3751         desc->tbs[0].lo = htole32(data->cmd_paddr);
3752         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3753             (TB0_SIZE << 4);
3754         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3755         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3756             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3757               + hdrlen + pad - TB0_SIZE) << 4);
3758
3759         /* Other DMA segments are for data payload. */
3760         for (i = 0; i < nsegs; i++) {
3761                 seg = &segs[i];
3762                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3763                 desc->tbs[i+2].hi_n_len = \
3764                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3765                     | ((seg->ds_len) << 4);
3766         }
3767
3768         bus_dmamap_sync(ring->data_dmat, data->map,
3769             BUS_DMASYNC_PREWRITE);
3770         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3771             BUS_DMASYNC_PREWRITE);
3772         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3773             BUS_DMASYNC_PREWRITE);
3774
3775 #if 0
3776         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3777 #endif
3778
3779         /* Kick TX ring. */
3780         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3781         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3782
3783         /* Mark TX ring as full if we reach a certain threshold. */
3784         if (++ring->queued > IWM_TX_RING_HIMARK) {
3785                 sc->qfullmsk |= 1 << ring->qid;
3786         }
3787
3788         return 0;
3789 }
3790
3791 static int
3792 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3793     const struct ieee80211_bpf_params *params)
3794 {
3795         struct ieee80211com *ic = ni->ni_ic;
3796         struct iwm_softc *sc = ic->ic_softc;
3797         int error = 0;
3798
3799         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3800             "->%s begin\n", __func__);
3801
3802         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3803                 m_freem(m);
3804                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3805                     "<-%s not RUNNING\n", __func__);
3806                 return (ENETDOWN);
3807         }
3808
3809         IWM_LOCK(sc);
3810         /* XXX fix this */
3811         if (params == NULL) {
3812                 error = iwm_tx(sc, m, ni, 0);
3813         } else {
3814                 error = iwm_tx(sc, m, ni, 0);
3815         }
3816         sc->sc_tx_timer = 5;
3817         IWM_UNLOCK(sc);
3818
3819         return (error);
3820 }
3821
3822 /*
3823  * mvm/tx.c
3824  */
3825
3826 /*
3827  * Note that there are transports that buffer frames before they reach
3828  * the firmware. This means that after flush_tx_path is called, the
3829  * queue might not be empty. The race-free way to handle this is to:
3830  * 1) set the station as draining
3831  * 2) flush the Tx path
3832  * 3) wait for the transport queues to be empty
3833  */
3834 int
3835 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3836 {
3837         int ret;
3838         struct iwm_tx_path_flush_cmd flush_cmd = {
3839                 .queues_ctl = htole32(tfd_msk),
3840                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3841         };
3842
3843         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3844             sizeof(flush_cmd), &flush_cmd);
3845         if (ret)
3846                 device_printf(sc->sc_dev,
3847                     "Flushing tx queue failed: %d\n", ret);
3848         return ret;
3849 }
3850
3851 /*
3852  * BEGIN mvm/quota.c
3853  */
3854
3855 static int
3856 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3857 {
3858         struct iwm_time_quota_cmd cmd;
3859         int i, idx, ret, num_active_macs, quota, quota_rem;
3860         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3861         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3862         uint16_t id;
3863
3864         memset(&cmd, 0, sizeof(cmd));
3865
3866         /* currently, PHY ID == binding ID */
3867         if (ivp) {
3868                 id = ivp->phy_ctxt->id;
3869                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3870                 colors[id] = ivp->phy_ctxt->color;
3871
3872                 if (1)
3873                         n_ifs[id] = 1;
3874         }
3875
3876         /*
3877          * The FW's scheduling session consists of
3878          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3879          * equally between all the bindings that require quota
3880          */
3881         num_active_macs = 0;
3882         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3883                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3884                 num_active_macs += n_ifs[i];
3885         }
3886
3887         quota = 0;
3888         quota_rem = 0;
3889         if (num_active_macs) {
3890                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3891                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3892         }
3893
3894         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3895                 if (colors[i] < 0)
3896                         continue;
3897
3898                 cmd.quotas[idx].id_and_color =
3899                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3900
3901                 if (n_ifs[i] <= 0) {
3902                         cmd.quotas[idx].quota = htole32(0);
3903                         cmd.quotas[idx].max_duration = htole32(0);
3904                 } else {
3905                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3906                         cmd.quotas[idx].max_duration = htole32(0);
3907                 }
3908                 idx++;
3909         }
3910
3911         /* Give the remainder of the session to the first binding */
3912         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3913
3914         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3915             sizeof(cmd), &cmd);
3916         if (ret)
3917                 device_printf(sc->sc_dev,
3918                     "%s: Failed to send quota: %d\n", __func__, ret);
3919         return ret;
3920 }
3921
3922 /*
3923  * END mvm/quota.c
3924  */
3925
3926 /*
3927  * ieee80211 routines
3928  */
3929
3930 /*
3931  * Change to AUTH state in 80211 state machine.  Roughly matches what
3932  * Linux does in bss_info_changed().
3933  */
3934 static int
3935 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3936 {
3937         struct ieee80211_node *ni;
3938         struct iwm_node *in;
3939         struct iwm_vap *iv = IWM_VAP(vap);
3940         uint32_t duration;
3941         int error;
3942
3943         /*
3944          * XXX i have a feeling that the vap node is being
3945          * freed from underneath us. Grr.
3946          */
3947         ni = ieee80211_ref_node(vap->iv_bss);
3948         in = IWM_NODE(ni);
3949         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3950             "%s: called; vap=%p, bss ni=%p\n",
3951             __func__,
3952             vap,
3953             ni);
3954         IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
3955             __func__, ether_sprintf(ni->ni_bssid));
3956
3957         in->in_assoc = 0;
3958         iv->iv_auth = 1;
3959
3960         /*
3961          * Firmware bug - it'll crash if the beacon interval is less
3962          * than 16. We can't avoid connecting at all, so refuse the
3963          * station state change, this will cause net80211 to abandon
3964          * attempts to connect to this AP, and eventually wpa_s will
3965          * blacklist the AP...
3966          */
3967         if (ni->ni_intval < 16) {
3968                 device_printf(sc->sc_dev,
3969                     "AP %s beacon interval is %d, refusing due to firmware bug!\n",
3970                     ether_sprintf(ni->ni_bssid), ni->ni_intval);
3971                 error = EINVAL;
3972                 goto out;
3973         }
3974
3975         error = iwm_allow_mcast(vap, sc);
3976         if (error) {
3977                 device_printf(sc->sc_dev,
3978                     "%s: failed to set multicast\n", __func__);
3979                 goto out;
3980         }
3981
3982         /*
3983          * This is where it deviates from what Linux does.
3984          *
3985          * Linux iwlwifi doesn't reset the nic each time, nor does it
3986          * call ctxt_add() here.  Instead, it adds it during vap creation,
3987          * and always does a mac_ctx_changed().
3988          *
3989          * The openbsd port doesn't attempt to do that - it reset things
3990          * at odd states and does the add here.
3991          *
3992          * So, until the state handling is fixed (ie, we never reset
3993          * the NIC except for a firmware failure, which should drag
3994          * the NIC back to IDLE, re-setup and re-add all the mac/phy
3995          * contexts that are required), let's do a dirty hack here.
3996          */
3997         if (iv->is_uploaded) {
3998                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
3999                         device_printf(sc->sc_dev,
4000                             "%s: failed to update MAC\n", __func__);
4001                         goto out;
4002                 }
4003         } else {
4004                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4005                         device_printf(sc->sc_dev,
4006                             "%s: failed to add MAC\n", __func__);
4007                         goto out;
4008                 }
4009         }
4010         sc->sc_firmware_state = 1;
4011
4012         if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4013             in->in_ni.ni_chan, 1, 1)) != 0) {
4014                 device_printf(sc->sc_dev,
4015                     "%s: failed update phy ctxt\n", __func__);
4016                 goto out;
4017         }
4018         iv->phy_ctxt = &sc->sc_phyctxt[0];
4019
4020         if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4021                 device_printf(sc->sc_dev,
4022                     "%s: binding update cmd\n", __func__);
4023                 goto out;
4024         }
4025         sc->sc_firmware_state = 2;
4026         /*
4027          * Authentication becomes unreliable when powersaving is left enabled
4028          * here. Powersaving will be activated again when association has
4029          * finished or is aborted.
4030          */
4031         iv->ps_disabled = TRUE;
4032         error = iwm_mvm_power_update_mac(sc);
4033         iv->ps_disabled = FALSE;
4034         if (error != 0) {
4035                 device_printf(sc->sc_dev,
4036                     "%s: failed to update power management\n",
4037                     __func__);
4038                 goto out;
4039         }
4040         if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4041                 device_printf(sc->sc_dev,
4042                     "%s: failed to add sta\n", __func__);
4043                 goto out;
4044         }
4045         sc->sc_firmware_state = 3;
4046
4047         /*
4048          * Prevent the FW from wandering off channel during association
4049          * by "protecting" the session with a time event.
4050          */
4051         /* XXX duration is in units of TU, not MS */
4052         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4053         iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4054         DELAY(100);
4055
4056         error = 0;
4057 out:
4058         if (error != 0)
4059                 iv->iv_auth = 0;
4060         ieee80211_free_node(ni);
4061         return (error);
4062 }
4063
4064 static struct ieee80211_node *
4065 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4066 {
4067         return malloc(sizeof (struct iwm_node), M_80211_NODE,
4068             M_NOWAIT | M_ZERO);
4069 }
4070
4071 static uint8_t
4072 iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4073 {
4074         uint8_t plcp = rate_n_flags & 0xff;
4075         int i;
4076
4077         for (i = 0; i <= IWM_RIDX_MAX; i++) {
4078                 if (iwm_rates[i].plcp == plcp)
4079                         return iwm_rates[i].rate;
4080         }
4081         return 0;
4082 }
4083
4084 uint8_t
4085 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4086 {
4087         int i;
4088         uint8_t rval;
4089
4090         for (i = 0; i < rs->rs_nrates; i++) {
4091                 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4092                 if (rval == iwm_rates[ridx].rate)
4093                         return rs->rs_rates[i];
4094         }
4095
4096         return 0;
4097 }
4098
4099 static int
4100 iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4101 {
4102         int i;
4103
4104         for (i = 0; i <= IWM_RIDX_MAX; i++) {
4105                 if (iwm_rates[i].rate == rate)
4106                         return i;
4107         }
4108
4109         device_printf(sc->sc_dev,
4110             "%s: WARNING: device rate for %u not found!\n",
4111             __func__, rate);
4112
4113         return -1;
4114 }
4115
4116
4117 static void
4118 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4119 {
4120         struct ieee80211_node *ni = &in->in_ni;
4121         struct iwm_lq_cmd *lq = &in->in_lq;
4122         struct ieee80211_rateset *rs = &ni->ni_rates;
4123         int nrates = rs->rs_nrates;
4124         int i, ridx, tab = 0;
4125 //      int txant = 0;
4126
4127         KASSERT(rix >= 0 && rix < nrates, ("invalid rix"));
4128
4129         if (nrates > nitems(lq->rs_table)) {
4130                 device_printf(sc->sc_dev,
4131                     "%s: node supports %d rates, driver handles "
4132                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4133                 return;
4134         }
4135         if (nrates == 0) {
4136                 device_printf(sc->sc_dev,
4137                     "%s: node supports 0 rates, odd!\n", __func__);
4138                 return;
4139         }
4140         nrates = imin(rix + 1, nrates);
4141
4142         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4143             "%s: nrates=%d\n", __func__, nrates);
4144
4145         /* then construct a lq_cmd based on those */
4146         memset(lq, 0, sizeof(*lq));
4147         lq->sta_id = IWM_STATION_ID;
4148
4149         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4150         if (ni->ni_flags & IEEE80211_NODE_HT)
4151                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4152
4153         /*
4154          * are these used? (we don't do SISO or MIMO)
4155          * need to set them to non-zero, though, or we get an error.
4156          */
4157         lq->single_stream_ant_msk = 1;
4158         lq->dual_stream_ant_msk = 1;
4159
4160         /*
4161          * Build the actual rate selection table.
4162          * The lowest bits are the rates.  Additionally,
4163          * CCK needs bit 9 to be set.  The rest of the bits
4164          * we add to the table select the tx antenna
4165          * Note that we add the rates in the highest rate first
4166          * (opposite of ni_rates).
4167          */
4168         for (i = 0; i < nrates; i++) {
4169                 int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4170                 int nextant;
4171
4172                 /* Map 802.11 rate to HW rate index. */
4173                 ridx = iwm_rate2ridx(sc, rate);
4174                 if (ridx == -1)
4175                         continue;
4176
4177 #if 0
4178                 if (txant == 0)
4179                         txant = iwm_mvm_get_valid_tx_ant(sc);
4180                 nextant = 1<<(ffs(txant)-1);
4181                 txant &= ~nextant;
4182 #else
4183                 nextant = iwm_mvm_get_valid_tx_ant(sc);
4184 #endif
4185                 tab = iwm_rates[ridx].plcp;
4186                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4187                 if (IWM_RIDX_IS_CCK(ridx))
4188                         tab |= IWM_RATE_MCS_CCK_MSK;
4189                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4190                     "station rate i=%d, rate=%d, hw=%x\n",
4191                     i, iwm_rates[ridx].rate, tab);
4192                 lq->rs_table[i] = htole32(tab);
4193         }
4194         /* then fill the rest with the lowest possible rate */
4195         for (i = nrates; i < nitems(lq->rs_table); i++) {
4196                 KASSERT(tab != 0, ("invalid tab"));
4197                 lq->rs_table[i] = htole32(tab);
4198         }
4199 }
4200
4201 static int
4202 iwm_media_change(struct ifnet *ifp)
4203 {
4204         struct ieee80211vap *vap = ifp->if_softc;
4205         struct ieee80211com *ic = vap->iv_ic;
4206         struct iwm_softc *sc = ic->ic_softc;
4207         int error;
4208
4209         error = ieee80211_media_change(ifp);
4210         if (error != ENETRESET)
4211                 return error;
4212
4213         IWM_LOCK(sc);
4214         if (ic->ic_nrunning > 0) {
4215                 iwm_stop(sc);
4216                 iwm_init(sc);
4217         }
4218         IWM_UNLOCK(sc);
4219         return error;
4220 }
4221
4222 static void
4223 iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4224 {
4225         struct iwm_vap *ivp = IWM_VAP(vap);
4226         int error;
4227
4228         ivp->iv_auth = 0;
4229         if (sc->sc_firmware_state == 3) {
4230                 iwm_xmit_queue_drain(sc);
4231 //              iwm_mvm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4232                 error = iwm_mvm_rm_sta(sc, vap, TRUE);
4233                 if (error) {
4234                         device_printf(sc->sc_dev,
4235                             "%s: Failed to remove station: %d\n",
4236                             __func__, error);
4237                 }
4238         }
4239         if (sc->sc_firmware_state == 3) {
4240                 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4241                 if (error) {
4242                         device_printf(sc->sc_dev,
4243                             "%s: Failed to change mac context: %d\n",
4244                             __func__, error);
4245                 }
4246         }
4247         if (sc->sc_firmware_state == 3) {
4248                 error = iwm_mvm_sf_update(sc, vap, FALSE);
4249                 if (error) {
4250                         device_printf(sc->sc_dev,
4251                             "%s: Failed to update smart FIFO: %d\n",
4252                             __func__, error);
4253                 }
4254         }
4255         if (sc->sc_firmware_state == 3) {
4256                 error = iwm_mvm_rm_sta_id(sc, vap);
4257                 if (error) {
4258                         device_printf(sc->sc_dev,
4259                             "%s: Failed to remove station id: %d\n",
4260                             __func__, error);
4261                 }
4262         }
4263         if (sc->sc_firmware_state == 3) {
4264                 error = iwm_mvm_update_quotas(sc, NULL);
4265                 if (error) {
4266                         device_printf(sc->sc_dev,
4267                             "%s: Failed to update PHY quota: %d\n",
4268                             __func__, error);
4269                 }
4270         }
4271         if (sc->sc_firmware_state == 3) {
4272                 /* XXX Might need to specify bssid correctly. */
4273                 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4274                 if (error) {
4275                         device_printf(sc->sc_dev,
4276                             "%s: Failed to change mac context: %d\n",
4277                             __func__, error);
4278                 }
4279         }
4280         if (sc->sc_firmware_state == 3) {
4281                 sc->sc_firmware_state = 2;
4282         }
4283         if (sc->sc_firmware_state > 1) {
4284                 error = iwm_mvm_binding_remove_vif(sc, ivp);
4285                 if (error) {
4286                         device_printf(sc->sc_dev,
4287                             "%s: Failed to remove channel ctx: %d\n",
4288                             __func__, error);
4289                 }
4290         }
4291         if (sc->sc_firmware_state > 1) {
4292                 sc->sc_firmware_state = 1;
4293         }
4294         ivp->phy_ctxt = NULL;
4295         if (sc->sc_firmware_state > 0) {
4296                 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4297                 if (error) {
4298                         device_printf(sc->sc_dev,
4299                             "%s: Failed to change mac context: %d\n",
4300                             __func__, error);
4301                 }
4302         }
4303         if (sc->sc_firmware_state > 0) {
4304                 error = iwm_mvm_power_update_mac(sc);
4305                 if (error != 0) {
4306                         device_printf(sc->sc_dev,
4307                             "%s: failed to update power management\n",
4308                             __func__);
4309                 }
4310         }
4311         sc->sc_firmware_state = 0;
4312 }
4313
4314 static int
4315 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4316 {
4317         struct iwm_vap *ivp = IWM_VAP(vap);
4318         struct ieee80211com *ic = vap->iv_ic;
4319         struct iwm_softc *sc = ic->ic_softc;
4320         struct iwm_node *in;
4321         int error;
4322
4323         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4324             "switching state %s -> %s arg=0x%x\n",
4325             ieee80211_state_name[vap->iv_state],
4326             ieee80211_state_name[nstate],
4327             arg);
4328
4329         IEEE80211_UNLOCK(ic);
4330         IWM_LOCK(sc);
4331
4332         /* Avoid Tx watchdog triggering, when a connectionm is dropped. */
4333         if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN)
4334                 sc->sc_tx_timer = 0;
4335
4336         if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4337             (nstate == IEEE80211_S_AUTH ||
4338              nstate == IEEE80211_S_ASSOC ||
4339              nstate == IEEE80211_S_RUN)) {
4340                 /* Stop blinking for a scan, when authenticating. */
4341                 iwm_led_blink_stop(sc);
4342         }
4343
4344         if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4345                 iwm_mvm_led_disable(sc);
4346                 /* disable beacon filtering if we're hopping out of RUN */
4347                 iwm_mvm_disable_beacon_filter(sc);
4348                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4349                         in->in_assoc = 0;
4350         }
4351
4352         if ((vap->iv_state == IEEE80211_S_RUN ||
4353              vap->iv_state == IEEE80211_S_ASSOC) &&
4354             nstate == IEEE80211_S_INIT) {
4355                 /*
4356                  * In this case, iv_newstate() wants to send an 80211 frame on
4357                  * the network that we are leaving. So we need to call it,
4358                  * before tearing down all the firmware state.
4359                  */
4360                 IWM_UNLOCK(sc);
4361                 IEEE80211_LOCK(ic);
4362                 ivp->iv_newstate(vap, nstate, arg);
4363                 IEEE80211_UNLOCK(ic);
4364                 IWM_LOCK(sc);
4365                 iwm_bring_down_firmware(sc, vap);
4366                 IWM_UNLOCK(sc);
4367                 IEEE80211_LOCK(ic);
4368                 return 0;
4369         }
4370
4371         switch (nstate) {
4372         case IEEE80211_S_INIT:
4373         case IEEE80211_S_SCAN:
4374                 break;
4375
4376         case IEEE80211_S_AUTH:
4377                 iwm_bring_down_firmware(sc, vap);
4378                 if ((error = iwm_auth(vap, sc)) != 0) {
4379                         device_printf(sc->sc_dev,
4380                             "%s: could not move to auth state: %d\n",
4381                             __func__, error);
4382                         iwm_bring_down_firmware(sc, vap);
4383                         IWM_UNLOCK(sc);
4384                         IEEE80211_LOCK(ic);
4385                         return 1;
4386                 }
4387                 break;
4388
4389         case IEEE80211_S_ASSOC:
4390                 /*
4391                  * EBS may be disabled due to previous failures reported by FW.
4392                  * Reset EBS status here assuming environment has been changed.
4393                  */
4394                 sc->last_ebs_successful = TRUE;
4395                 break;
4396
4397         case IEEE80211_S_RUN:
4398                 in = IWM_NODE(vap->iv_bss);
4399                 /* Update the association state, now we have it all */
4400                 /* (eg associd comes in at this point */
4401                 error = iwm_mvm_update_sta(sc, in);
4402                 if (error != 0) {
4403                         device_printf(sc->sc_dev,
4404                             "%s: failed to update STA\n", __func__);
4405                         IWM_UNLOCK(sc);
4406                         IEEE80211_LOCK(ic);
4407                         return error;
4408                 }
4409                 in->in_assoc = 1;
4410                 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4411                 if (error != 0) {
4412                         device_printf(sc->sc_dev,
4413                             "%s: failed to update MAC: %d\n", __func__, error);
4414                 }
4415
4416                 iwm_mvm_sf_update(sc, vap, FALSE);
4417                 iwm_mvm_enable_beacon_filter(sc, ivp);
4418                 iwm_mvm_power_update_mac(sc);
4419                 iwm_mvm_update_quotas(sc, ivp);
4420                 int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4421                 iwm_setrates(sc, in, rix);
4422
4423                 if ((error = iwm_mvm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4424                         device_printf(sc->sc_dev,
4425                             "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4426                 }
4427
4428                 iwm_mvm_led_enable(sc);
4429                 break;
4430
4431         default:
4432                 break;
4433         }
4434         IWM_UNLOCK(sc);
4435         IEEE80211_LOCK(ic);
4436
4437         return (ivp->iv_newstate(vap, nstate, arg));
4438 }
4439
4440 void
4441 iwm_endscan_cb(void *arg, int pending)
4442 {
4443         struct iwm_softc *sc = arg;
4444         struct ieee80211com *ic = &sc->sc_ic;
4445
4446         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4447             "%s: scan ended\n",
4448             __func__);
4449
4450         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4451 }
4452
4453 static int
4454 iwm_send_bt_init_conf(struct iwm_softc *sc)
4455 {
4456         struct iwm_bt_coex_cmd bt_cmd;
4457
4458         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4459         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4460
4461         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4462             &bt_cmd);
4463 }
4464
4465 static boolean_t
4466 iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4467 {
4468         boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4469         boolean_t tlv_lar = fw_has_capa(&sc->sc_fw.ucode_capa,
4470                                         IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4471
4472         if (iwm_lar_disable)
4473                 return FALSE;
4474
4475         /*
4476          * Enable LAR only if it is supported by the FW (TLV) &&
4477          * enabled in the NVM
4478          */
4479         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4480                 return nvm_lar && tlv_lar;
4481         else
4482                 return tlv_lar;
4483 }
4484
4485 static boolean_t
4486 iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4487 {
4488         return fw_has_api(&sc->sc_fw.ucode_capa,
4489                           IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4490                fw_has_capa(&sc->sc_fw.ucode_capa,
4491                            IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4492 }
4493
4494 static int
4495 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4496 {
4497         struct iwm_mcc_update_cmd mcc_cmd;
4498         struct iwm_host_cmd hcmd = {
4499                 .id = IWM_MCC_UPDATE_CMD,
4500                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4501                 .data = { &mcc_cmd },
4502         };
4503         int ret;
4504 #ifdef IWM_DEBUG
4505         struct iwm_rx_packet *pkt;
4506         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4507         struct iwm_mcc_update_resp *mcc_resp;
4508         int n_channels;
4509         uint16_t mcc;
4510 #endif
4511         int resp_v2 = fw_has_capa(&sc->sc_fw.ucode_capa,
4512             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4513
4514         if (!iwm_mvm_is_lar_supported(sc)) {
4515                 IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4516                     __func__);
4517                 return 0;
4518         }
4519
4520         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4521         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4522         if (iwm_mvm_is_wifi_mcc_supported(sc))
4523                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4524         else
4525                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4526
4527         if (resp_v2)
4528                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4529         else
4530                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4531
4532         IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4533             "send MCC update to FW with '%c%c' src = %d\n",
4534             alpha2[0], alpha2[1], mcc_cmd.source_id);
4535
4536         ret = iwm_send_cmd(sc, &hcmd);
4537         if (ret)
4538                 return ret;
4539
4540 #ifdef IWM_DEBUG
4541         pkt = hcmd.resp_pkt;
4542
4543         /* Extract MCC response */
4544         if (resp_v2) {
4545                 mcc_resp = (void *)pkt->data;
4546                 mcc = mcc_resp->mcc;
4547                 n_channels =  le32toh(mcc_resp->n_channels);
4548         } else {
4549                 mcc_resp_v1 = (void *)pkt->data;
4550                 mcc = mcc_resp_v1->mcc;
4551                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4552         }
4553
4554         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4555         if (mcc == 0)
4556                 mcc = 0x3030;  /* "00" - world */
4557
4558         IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4559             "regulatory domain '%c%c' (%d channels available)\n",
4560             mcc >> 8, mcc & 0xff, n_channels);
4561 #endif
4562         iwm_free_resp(sc, &hcmd);
4563
4564         return 0;
4565 }
4566
4567 static void
4568 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4569 {
4570         struct iwm_host_cmd cmd = {
4571                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4572                 .len = { sizeof(uint32_t), },
4573                 .data = { &backoff, },
4574         };
4575
4576         if (iwm_send_cmd(sc, &cmd) != 0) {
4577                 device_printf(sc->sc_dev,
4578                     "failed to change thermal tx backoff\n");
4579         }
4580 }
4581
4582 static int
4583 iwm_init_hw(struct iwm_softc *sc)
4584 {
4585         struct ieee80211com *ic = &sc->sc_ic;
4586         int error, i, ac;
4587
4588         sc->sf_state = IWM_SF_UNINIT;
4589
4590         if ((error = iwm_start_hw(sc)) != 0) {
4591                 printf("iwm_start_hw: failed %d\n", error);
4592                 return error;
4593         }
4594
4595         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4596                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4597                 return error;
4598         }
4599
4600         /*
4601          * should stop and start HW since that INIT
4602          * image just loaded
4603          */
4604         iwm_stop_device(sc);
4605         sc->sc_ps_disabled = FALSE;
4606         if ((error = iwm_start_hw(sc)) != 0) {
4607                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4608                 return error;
4609         }
4610
4611         /* omstart, this time with the regular firmware */
4612         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4613         if (error) {
4614                 device_printf(sc->sc_dev, "could not load firmware\n");
4615                 goto error;
4616         }
4617
4618         error = iwm_mvm_sf_update(sc, NULL, FALSE);
4619         if (error)
4620                 device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4621
4622         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4623                 device_printf(sc->sc_dev, "bt init conf failed\n");
4624                 goto error;
4625         }
4626
4627         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4628         if (error != 0) {
4629                 device_printf(sc->sc_dev, "antenna config failed\n");
4630                 goto error;
4631         }
4632
4633         /* Send phy db control command and then phy db calibration */
4634         if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4635                 goto error;
4636
4637         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4638                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4639                 goto error;
4640         }
4641
4642         /* Add auxiliary station for scanning */
4643         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4644                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4645                 goto error;
4646         }
4647
4648         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4649                 /*
4650                  * The channel used here isn't relevant as it's
4651                  * going to be overwritten in the other flows.
4652                  * For now use the first channel we have.
4653                  */
4654                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4655                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4656                         goto error;
4657         }
4658
4659         /* Initialize tx backoffs to the minimum. */
4660         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4661                 iwm_mvm_tt_tx_backoff(sc, 0);
4662
4663         if (iwm_mvm_config_ltr(sc) != 0)
4664                 device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4665
4666         error = iwm_mvm_power_update_device(sc);
4667         if (error)
4668                 goto error;
4669
4670         if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4671                 goto error;
4672
4673         if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4674                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4675                         goto error;
4676         }
4677
4678         /* Enable Tx queues. */
4679         for (ac = 0; ac < WME_NUM_AC; ac++) {
4680                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4681                     iwm_mvm_ac_to_tx_fifo[ac]);
4682                 if (error)
4683                         goto error;
4684         }
4685
4686         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4687                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4688                 goto error;
4689         }
4690
4691         return 0;
4692
4693  error:
4694         iwm_stop_device(sc);
4695         return error;
4696 }
4697
4698 /* Allow multicast from our BSSID. */
4699 static int
4700 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4701 {
4702         struct ieee80211_node *ni = vap->iv_bss;
4703         struct iwm_mcast_filter_cmd *cmd;
4704         size_t size;
4705         int error;
4706
4707         size = roundup(sizeof(*cmd), 4);
4708         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4709         if (cmd == NULL)
4710                 return ENOMEM;
4711         cmd->filter_own = 1;
4712         cmd->port_id = 0;
4713         cmd->count = 0;
4714         cmd->pass_all = 1;
4715         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4716
4717         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4718             IWM_CMD_SYNC, size, cmd);
4719         free(cmd, M_DEVBUF);
4720
4721         return (error);
4722 }
4723
4724 /*
4725  * ifnet interfaces
4726  */
4727
4728 static void
4729 iwm_init(struct iwm_softc *sc)
4730 {
4731         int error;
4732
4733         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4734                 return;
4735         }
4736         sc->sc_generation++;
4737         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4738
4739         if ((error = iwm_init_hw(sc)) != 0) {
4740                 printf("iwm_init_hw failed %d\n", error);
4741                 iwm_stop(sc);
4742                 return;
4743         }
4744
4745         /*
4746          * Ok, firmware loaded and we are jogging
4747          */
4748         sc->sc_flags |= IWM_FLAG_HW_INITED;
4749         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4750 }
4751
4752 static int
4753 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4754 {
4755         struct iwm_softc *sc;
4756         int error;
4757
4758         sc = ic->ic_softc;
4759
4760         IWM_LOCK(sc);
4761         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4762                 IWM_UNLOCK(sc);
4763                 return (ENXIO);
4764         }
4765         error = mbufq_enqueue(&sc->sc_snd, m);
4766         if (error) {
4767                 IWM_UNLOCK(sc);
4768                 return (error);
4769         }
4770         iwm_start(sc);
4771         IWM_UNLOCK(sc);
4772         return (0);
4773 }
4774
4775 /*
4776  * Dequeue packets from sendq and call send.
4777  */
4778 static void
4779 iwm_start(struct iwm_softc *sc)
4780 {
4781         struct ieee80211_node *ni;
4782         struct mbuf *m;
4783         int ac = 0;
4784
4785         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4786         while (sc->qfullmsk == 0 &&
4787                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4788                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4789                 if (iwm_tx(sc, m, ni, ac) != 0) {
4790                         if_inc_counter(ni->ni_vap->iv_ifp,
4791                             IFCOUNTER_OERRORS, 1);
4792                         ieee80211_free_node(ni);
4793                         continue;
4794                 }
4795                 sc->sc_tx_timer = 15;
4796         }
4797         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4798 }
4799
4800 static void
4801 iwm_stop(struct iwm_softc *sc)
4802 {
4803
4804         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4805         sc->sc_flags |= IWM_FLAG_STOPPED;
4806         sc->sc_generation++;
4807         iwm_led_blink_stop(sc);
4808         sc->sc_tx_timer = 0;
4809         iwm_stop_device(sc);
4810         sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4811 }
4812
4813 static void
4814 iwm_watchdog(void *arg)
4815 {
4816         struct iwm_softc *sc = arg;
4817         struct ieee80211com *ic = &sc->sc_ic;
4818
4819         if (sc->sc_tx_timer > 0) {
4820                 if (--sc->sc_tx_timer == 0) {
4821                         device_printf(sc->sc_dev, "device timeout\n");
4822 #ifdef IWM_DEBUG
4823                         iwm_nic_error(sc);
4824 #endif
4825                         ieee80211_restart_all(ic);
4826                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4827                         return;
4828                 }
4829         }
4830         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4831 }
4832
4833 static void
4834 iwm_parent(struct ieee80211com *ic)
4835 {
4836         struct iwm_softc *sc = ic->ic_softc;
4837         int startall = 0;
4838
4839         IWM_LOCK(sc);
4840         if (ic->ic_nrunning > 0) {
4841                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4842                         iwm_init(sc);
4843                         startall = 1;
4844                 }
4845         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4846                 iwm_stop(sc);
4847         IWM_UNLOCK(sc);
4848         if (startall)
4849                 ieee80211_start_all(ic);
4850 }
4851
4852 /*
4853  * The interrupt side of things
4854  */
4855
4856 /*
4857  * error dumping routines are from iwlwifi/mvm/utils.c
4858  */
4859
4860 /*
4861  * Note: This structure is read from the device with IO accesses,
4862  * and the reading already does the endian conversion. As it is
4863  * read with uint32_t-sized accesses, any members with a different size
4864  * need to be ordered correctly though!
4865  */
4866 struct iwm_error_event_table {
4867         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4868         uint32_t error_id;              /* type of error */
4869         uint32_t trm_hw_status0;        /* TRM HW status */
4870         uint32_t trm_hw_status1;        /* TRM HW status */
4871         uint32_t blink2;                /* branch link */
4872         uint32_t ilink1;                /* interrupt link */
4873         uint32_t ilink2;                /* interrupt link */
4874         uint32_t data1;         /* error-specific data */
4875         uint32_t data2;         /* error-specific data */
4876         uint32_t data3;         /* error-specific data */
4877         uint32_t bcon_time;             /* beacon timer */
4878         uint32_t tsf_low;               /* network timestamp function timer */
4879         uint32_t tsf_hi;                /* network timestamp function timer */
4880         uint32_t gp1;           /* GP1 timer register */
4881         uint32_t gp2;           /* GP2 timer register */
4882         uint32_t fw_rev_type;   /* firmware revision type */
4883         uint32_t major;         /* uCode version major */
4884         uint32_t minor;         /* uCode version minor */
4885         uint32_t hw_ver;                /* HW Silicon version */
4886         uint32_t brd_ver;               /* HW board version */
4887         uint32_t log_pc;                /* log program counter */
4888         uint32_t frame_ptr;             /* frame pointer */
4889         uint32_t stack_ptr;             /* stack pointer */
4890         uint32_t hcmd;          /* last host command header */
4891         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4892                                  * rxtx_flag */
4893         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4894                                  * host_flag */
4895         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4896                                  * enc_flag */
4897         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4898                                  * time_flag */
4899         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4900                                  * wico interrupt */
4901         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
4902         uint32_t wait_event;            /* wait event() caller address */
4903         uint32_t l2p_control;   /* L2pControlField */
4904         uint32_t l2p_duration;  /* L2pDurationField */
4905         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4906         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4907         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4908                                  * (LMPM_PMG_SEL) */
4909         uint32_t u_timestamp;   /* indicate when the date and time of the
4910                                  * compilation */
4911         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
4912 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4913
4914 /*
4915  * UMAC error struct - relevant starting from family 8000 chip.
4916  * Note: This structure is read from the device with IO accesses,
4917  * and the reading already does the endian conversion. As it is
4918  * read with u32-sized accesses, any members with a different size
4919  * need to be ordered correctly though!
4920  */
4921 struct iwm_umac_error_event_table {
4922         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4923         uint32_t error_id;      /* type of error */
4924         uint32_t blink1;        /* branch link */
4925         uint32_t blink2;        /* branch link */
4926         uint32_t ilink1;        /* interrupt link */
4927         uint32_t ilink2;        /* interrupt link */
4928         uint32_t data1;         /* error-specific data */
4929         uint32_t data2;         /* error-specific data */
4930         uint32_t data3;         /* error-specific data */
4931         uint32_t umac_major;
4932         uint32_t umac_minor;
4933         uint32_t frame_pointer; /* core register 27*/
4934         uint32_t stack_pointer; /* core register 28 */
4935         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
4936         uint32_t nic_isr_pref;  /* ISR status register */
4937 } __packed;
4938
4939 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4940 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4941
4942 #ifdef IWM_DEBUG
4943 struct {
4944         const char *name;
4945         uint8_t num;
4946 } advanced_lookup[] = {
4947         { "NMI_INTERRUPT_WDG", 0x34 },
4948         { "SYSASSERT", 0x35 },
4949         { "UCODE_VERSION_MISMATCH", 0x37 },
4950         { "BAD_COMMAND", 0x38 },
4951         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
4952         { "FATAL_ERROR", 0x3D },
4953         { "NMI_TRM_HW_ERR", 0x46 },
4954         { "NMI_INTERRUPT_TRM", 0x4C },
4955         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
4956         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
4957         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
4958         { "NMI_INTERRUPT_HOST", 0x66 },
4959         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
4960         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
4961         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
4962         { "ADVANCED_SYSASSERT", 0 },
4963 };
4964
4965 static const char *
4966 iwm_desc_lookup(uint32_t num)
4967 {
4968         int i;
4969
4970         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
4971                 if (advanced_lookup[i].num == num)
4972                         return advanced_lookup[i].name;
4973
4974         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
4975         return advanced_lookup[i].name;
4976 }
4977
4978 static void
4979 iwm_nic_umac_error(struct iwm_softc *sc)
4980 {
4981         struct iwm_umac_error_event_table table;
4982         uint32_t base;
4983
4984         base = sc->umac_error_event_table;
4985
4986         if (base < 0x800000) {
4987                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
4988                     base);
4989                 return;
4990         }
4991
4992         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
4993                 device_printf(sc->sc_dev, "reading errlog failed\n");
4994                 return;
4995         }
4996
4997         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
4998                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
4999                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5000                     sc->sc_flags, table.valid);
5001         }
5002
5003         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5004                 iwm_desc_lookup(table.error_id));
5005         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5006         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5007         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5008             table.ilink1);
5009         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5010             table.ilink2);
5011         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5012         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5013         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5014         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5015         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5016         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5017             table.frame_pointer);
5018         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5019             table.stack_pointer);
5020         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5021         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5022             table.nic_isr_pref);
5023 }
5024
5025 /*
5026  * Support for dumping the error log seemed like a good idea ...
5027  * but it's mostly hex junk and the only sensible thing is the
5028  * hw/ucode revision (which we know anyway).  Since it's here,
5029  * I'll just leave it in, just in case e.g. the Intel guys want to
5030  * help us decipher some "ADVANCED_SYSASSERT" later.
5031  */
5032 static void
5033 iwm_nic_error(struct iwm_softc *sc)
5034 {
5035         struct iwm_error_event_table table;
5036         uint32_t base;
5037
5038         device_printf(sc->sc_dev, "dumping device error log\n");
5039         base = sc->error_event_table;
5040         if (base < 0x800000) {
5041                 device_printf(sc->sc_dev,
5042                     "Invalid error log pointer 0x%08x\n", base);
5043                 return;
5044         }
5045
5046         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5047                 device_printf(sc->sc_dev, "reading errlog failed\n");
5048                 return;
5049         }
5050
5051         if (!table.valid) {
5052                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5053                 return;
5054         }
5055
5056         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5057                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5058                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5059                     sc->sc_flags, table.valid);
5060         }
5061
5062         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5063             iwm_desc_lookup(table.error_id));
5064         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5065             table.trm_hw_status0);
5066         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5067             table.trm_hw_status1);
5068         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5069         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5070         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5071         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5072         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5073         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5074         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5075         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5076         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5077         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5078         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5079         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5080             table.fw_rev_type);
5081         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5082         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5083         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5084         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5085         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5086         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5087         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5088         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5089         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5090         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5091         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5092         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5093         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5094         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5095         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5096         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5097         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5098         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5099         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5100
5101         if (sc->umac_error_event_table)
5102                 iwm_nic_umac_error(sc);
5103 }
5104 #endif
5105
5106 static void
5107 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5108 {
5109         struct ieee80211com *ic = &sc->sc_ic;
5110         struct iwm_cmd_response *cresp;
5111         struct mbuf *m1;
5112         uint32_t offset = 0;
5113         uint32_t maxoff = IWM_RBUF_SIZE;
5114         uint32_t nextoff;
5115         boolean_t stolen = FALSE;
5116
5117 #define HAVEROOM(a)     \
5118     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5119
5120         while (HAVEROOM(offset)) {
5121                 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5122                     offset);
5123                 int qid, idx, code, len;
5124
5125                 qid = pkt->hdr.qid;
5126                 idx = pkt->hdr.idx;
5127
5128                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5129
5130                 /*
5131                  * randomly get these from the firmware, no idea why.
5132                  * they at least seem harmless, so just ignore them for now
5133                  */
5134                 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5135                     pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5136                         break;
5137                 }
5138
5139                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5140                     "rx packet qid=%d idx=%d type=%x\n",
5141                     qid & ~0x80, pkt->hdr.idx, code);
5142
5143                 len = iwm_rx_packet_len(pkt);
5144                 len += sizeof(uint32_t); /* account for status word */
5145                 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5146
5147                 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5148
5149                 switch (code) {
5150                 case IWM_REPLY_RX_PHY_CMD:
5151                         iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5152                         break;
5153
5154                 case IWM_REPLY_RX_MPDU_CMD: {
5155                         /*
5156                          * If this is the last frame in the RX buffer, we
5157                          * can directly feed the mbuf to the sharks here.
5158                          */
5159                         struct iwm_rx_packet *nextpkt = mtodoff(m,
5160                             struct iwm_rx_packet *, nextoff);
5161                         if (!HAVEROOM(nextoff) ||
5162                             (nextpkt->hdr.code == 0 &&
5163                              (nextpkt->hdr.qid & ~0x80) == 0 &&
5164                              nextpkt->hdr.idx == 0) ||
5165                             (nextpkt->len_n_flags ==
5166                              htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5167                                 if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5168                                         stolen = FALSE;
5169                                         /* Make sure we abort the loop */
5170                                         nextoff = maxoff;
5171                                 }
5172                                 break;
5173                         }
5174
5175                         /*
5176                          * Use m_copym instead of m_split, because that
5177                          * makes it easier to keep a valid rx buffer in
5178                          * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5179                          *
5180                          * We need to start m_copym() at offset 0, to get the
5181                          * M_PKTHDR flag preserved.
5182                          */
5183                         m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5184                         if (m1) {
5185                                 if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5186                                         stolen = TRUE;
5187                                 else
5188                                         m_freem(m1);
5189                         }
5190                         break;
5191                 }
5192
5193                 case IWM_TX_CMD:
5194                         iwm_mvm_rx_tx_cmd(sc, pkt);
5195                         break;
5196
5197                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5198                         struct iwm_missed_beacons_notif *resp;
5199                         int missed;
5200
5201                         /* XXX look at mac_id to determine interface ID */
5202                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5203
5204                         resp = (void *)pkt->data;
5205                         missed = le32toh(resp->consec_missed_beacons);
5206
5207                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5208                             "%s: MISSED_BEACON: mac_id=%d, "
5209                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5210                             "num_rx=%d\n",
5211                             __func__,
5212                             le32toh(resp->mac_id),
5213                             le32toh(resp->consec_missed_beacons_since_last_rx),
5214                             le32toh(resp->consec_missed_beacons),
5215                             le32toh(resp->num_expected_beacons),
5216                             le32toh(resp->num_recvd_beacons));
5217
5218                         /* Be paranoid */
5219                         if (vap == NULL)
5220                                 break;
5221
5222                         /* XXX no net80211 locking? */
5223                         if (vap->iv_state == IEEE80211_S_RUN &&
5224                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5225                                 if (missed > vap->iv_bmissthreshold) {
5226                                         /* XXX bad locking; turn into task */
5227                                         IWM_UNLOCK(sc);
5228                                         ieee80211_beacon_miss(ic);
5229                                         IWM_LOCK(sc);
5230                                 }
5231                         }
5232
5233                         break;
5234                 }
5235
5236                 case IWM_MFUART_LOAD_NOTIFICATION:
5237                         break;
5238
5239                 case IWM_MVM_ALIVE:
5240                         break;
5241
5242                 case IWM_CALIB_RES_NOTIF_PHY_DB:
5243                         break;
5244
5245                 case IWM_STATISTICS_NOTIFICATION:
5246                         iwm_mvm_handle_rx_statistics(sc, pkt);
5247                         break;
5248
5249                 case IWM_NVM_ACCESS_CMD:
5250                 case IWM_MCC_UPDATE_CMD:
5251                         if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5252                                 memcpy(sc->sc_cmd_resp,
5253                                     pkt, sizeof(sc->sc_cmd_resp));
5254                         }
5255                         break;
5256
5257                 case IWM_MCC_CHUB_UPDATE_CMD: {
5258                         struct iwm_mcc_chub_notif *notif;
5259                         notif = (void *)pkt->data;
5260
5261                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5262                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5263                         sc->sc_fw_mcc[2] = '\0';
5264                         IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5265                             "fw source %d sent CC '%s'\n",
5266                             notif->source_id, sc->sc_fw_mcc);
5267                         break;
5268                 }
5269
5270                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5271                 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5272                                  IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5273                         struct iwm_dts_measurement_notif_v1 *notif;
5274
5275                         if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5276                                 device_printf(sc->sc_dev,
5277                                     "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5278                                 break;
5279                         }
5280                         notif = (void *)pkt->data;
5281                         IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5282                             "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5283                             notif->temp);
5284                         break;
5285                 }
5286
5287                 case IWM_PHY_CONFIGURATION_CMD:
5288                 case IWM_TX_ANT_CONFIGURATION_CMD:
5289                 case IWM_ADD_STA:
5290                 case IWM_MAC_CONTEXT_CMD:
5291                 case IWM_REPLY_SF_CFG_CMD:
5292                 case IWM_POWER_TABLE_CMD:
5293                 case IWM_LTR_CONFIG:
5294                 case IWM_PHY_CONTEXT_CMD:
5295                 case IWM_BINDING_CONTEXT_CMD:
5296                 case IWM_TIME_EVENT_CMD:
5297                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5298                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5299                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5300                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5301                 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5302                 case IWM_REPLY_BEACON_FILTERING_CMD:
5303                 case IWM_MAC_PM_POWER_TABLE:
5304                 case IWM_TIME_QUOTA_CMD:
5305                 case IWM_REMOVE_STA:
5306                 case IWM_TXPATH_FLUSH:
5307                 case IWM_LQ_CMD:
5308                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5309                                  IWM_FW_PAGING_BLOCK_CMD):
5310                 case IWM_BT_CONFIG:
5311                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5312                         cresp = (void *)pkt->data;
5313                         if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5314                                 memcpy(sc->sc_cmd_resp,
5315                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5316                         }
5317                         break;
5318
5319                 /* ignore */
5320                 case IWM_PHY_DB_CMD:
5321                         break;
5322
5323                 case IWM_INIT_COMPLETE_NOTIF:
5324                         break;
5325
5326                 case IWM_SCAN_OFFLOAD_COMPLETE:
5327                         iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5328                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5329                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5330                                 ieee80211_runtask(ic, &sc->sc_es_task);
5331                         }
5332                         break;
5333
5334                 case IWM_SCAN_ITERATION_COMPLETE: {
5335                         struct iwm_lmac_scan_complete_notif *notif;
5336                         notif = (void *)pkt->data;
5337                         break;
5338                 }
5339
5340                 case IWM_SCAN_COMPLETE_UMAC:
5341                         iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5342                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5343                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5344                                 ieee80211_runtask(ic, &sc->sc_es_task);
5345                         }
5346                         break;
5347
5348                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5349                         struct iwm_umac_scan_iter_complete_notif *notif;
5350                         notif = (void *)pkt->data;
5351
5352                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5353                             "complete, status=0x%x, %d channels scanned\n",
5354                             notif->status, notif->scanned_channels);
5355                         break;
5356                 }
5357
5358                 case IWM_REPLY_ERROR: {
5359                         struct iwm_error_resp *resp;
5360                         resp = (void *)pkt->data;
5361
5362                         device_printf(sc->sc_dev,
5363                             "firmware error 0x%x, cmd 0x%x\n",
5364                             le32toh(resp->error_type),
5365                             resp->cmd_id);
5366                         break;
5367                 }
5368
5369                 case IWM_TIME_EVENT_NOTIFICATION: {
5370                         struct iwm_time_event_notif *notif;
5371                         notif = (void *)pkt->data;
5372
5373                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5374                             "TE notif status = 0x%x action = 0x%x\n",
5375                             notif->status, notif->action);
5376                         break;
5377                 }
5378
5379                 /*
5380                  * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5381                  * messages. Just ignore them for now.
5382                  */
5383                 case IWM_DEBUG_LOG_MSG:
5384                         break;
5385
5386                 case IWM_MCAST_FILTER_CMD:
5387                         break;
5388
5389                 case IWM_SCD_QUEUE_CFG: {
5390                         struct iwm_scd_txq_cfg_rsp *rsp;
5391                         rsp = (void *)pkt->data;
5392
5393                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5394                             "queue cfg token=0x%x sta_id=%d "
5395                             "tid=%d scd_queue=%d\n",
5396                             rsp->token, rsp->sta_id, rsp->tid,
5397                             rsp->scd_queue);
5398                         break;
5399                 }
5400
5401                 default:
5402                         device_printf(sc->sc_dev,
5403                             "frame %d/%d %x UNHANDLED (this should "
5404                             "not happen)\n", qid & ~0x80, idx,
5405                             pkt->len_n_flags);
5406                         break;
5407                 }
5408
5409                 /*
5410                  * Why test bit 0x80?  The Linux driver:
5411                  *
5412                  * There is one exception:  uCode sets bit 15 when it
5413                  * originates the response/notification, i.e. when the
5414                  * response/notification is not a direct response to a
5415                  * command sent by the driver.  For example, uCode issues
5416                  * IWM_REPLY_RX when it sends a received frame to the driver;
5417                  * it is not a direct response to any driver command.
5418                  *
5419                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5420                  * uses a slightly different format for pkt->hdr, and "qid"
5421                  * is actually the upper byte of a two-byte field.
5422                  */
5423                 if (!(qid & (1 << 7)))
5424                         iwm_cmd_done(sc, pkt);
5425
5426                 offset = nextoff;
5427         }
5428         if (stolen)
5429                 m_freem(m);
5430 #undef HAVEROOM
5431 }
5432
5433 /*
5434  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5435  * Basic structure from if_iwn
5436  */
5437 static void
5438 iwm_notif_intr(struct iwm_softc *sc)
5439 {
5440         uint16_t hw;
5441
5442         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5443             BUS_DMASYNC_POSTREAD);
5444
5445         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5446
5447         /*
5448          * Process responses
5449          */
5450         while (sc->rxq.cur != hw) {
5451                 struct iwm_rx_ring *ring = &sc->rxq;
5452                 struct iwm_rx_data *data = &ring->data[ring->cur];
5453
5454                 bus_dmamap_sync(ring->data_dmat, data->map,
5455                     BUS_DMASYNC_POSTREAD);
5456
5457                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5458                     "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5459                 iwm_handle_rxb(sc, data->m);
5460
5461                 ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5462         }
5463
5464         /*
5465          * Tell the firmware that it can reuse the ring entries that
5466          * we have just processed.
5467          * Seems like the hardware gets upset unless we align
5468          * the write by 8??
5469          */
5470         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5471         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5472 }
5473
5474 static void
5475 iwm_intr(void *arg)
5476 {
5477         struct iwm_softc *sc = arg;
5478         int handled = 0;
5479         int r1, r2, rv = 0;
5480         int isperiodic = 0;
5481
5482         IWM_LOCK(sc);
5483         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5484
5485         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5486                 uint32_t *ict = sc->ict_dma.vaddr;
5487                 int tmp;
5488
5489                 tmp = htole32(ict[sc->ict_cur]);
5490                 if (!tmp)
5491                         goto out_ena;
5492
5493                 /*
5494                  * ok, there was something.  keep plowing until we have all.
5495                  */
5496                 r1 = r2 = 0;
5497                 while (tmp) {
5498                         r1 |= tmp;
5499                         ict[sc->ict_cur] = 0;
5500                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5501                         tmp = htole32(ict[sc->ict_cur]);
5502                 }
5503
5504                 /* this is where the fun begins.  don't ask */
5505                 if (r1 == 0xffffffff)
5506                         r1 = 0;
5507
5508                 /* i am not expected to understand this */
5509                 if (r1 & 0xc0000)
5510                         r1 |= 0x8000;
5511                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5512         } else {
5513                 r1 = IWM_READ(sc, IWM_CSR_INT);
5514                 /* "hardware gone" (where, fishing?) */
5515                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5516                         goto out;
5517                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5518         }
5519         if (r1 == 0 && r2 == 0) {
5520                 goto out_ena;
5521         }
5522
5523         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5524
5525         /* Safely ignore these bits for debug checks below */
5526         r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5527
5528         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5529                 int i;
5530                 struct ieee80211com *ic = &sc->sc_ic;
5531                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5532
5533 #ifdef IWM_DEBUG
5534                 iwm_nic_error(sc);
5535 #endif
5536                 /* Dump driver status (TX and RX rings) while we're here. */
5537                 device_printf(sc->sc_dev, "driver status:\n");
5538                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5539                         struct iwm_tx_ring *ring = &sc->txq[i];
5540                         device_printf(sc->sc_dev,
5541                             "  tx ring %2d: qid=%-2d cur=%-3d "
5542                             "queued=%-3d\n",
5543                             i, ring->qid, ring->cur, ring->queued);
5544                 }
5545                 device_printf(sc->sc_dev,
5546                     "  rx ring: cur=%d\n", sc->rxq.cur);
5547                 device_printf(sc->sc_dev,
5548                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5549
5550                 /* Reset our firmware state tracking. */
5551                 sc->sc_firmware_state = 0;
5552                 /* Don't stop the device; just do a VAP restart */
5553                 IWM_UNLOCK(sc);
5554
5555                 if (vap == NULL) {
5556                         printf("%s: null vap\n", __func__);
5557                         return;
5558                 }
5559
5560                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5561                     "restarting\n", __func__, vap->iv_state);
5562
5563                 ieee80211_restart_all(ic);
5564                 return;
5565         }
5566
5567         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5568                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5569                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5570                 iwm_stop(sc);
5571                 rv = 1;
5572                 goto out;
5573         }
5574
5575         /* firmware chunk loaded */
5576         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5577                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5578                 handled |= IWM_CSR_INT_BIT_FH_TX;
5579                 sc->sc_fw_chunk_done = 1;
5580                 wakeup(&sc->sc_fw);
5581         }
5582
5583         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5584                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5585                 if (iwm_check_rfkill(sc)) {
5586                         device_printf(sc->sc_dev,
5587                             "%s: rfkill switch, disabling interface\n",
5588                             __func__);
5589                         iwm_stop(sc);
5590                 }
5591         }
5592
5593         /*
5594          * The Linux driver uses periodic interrupts to avoid races.
5595          * We cargo-cult like it's going out of fashion.
5596          */
5597         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5598                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5599                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5600                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5601                         IWM_WRITE_1(sc,
5602                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5603                 isperiodic = 1;
5604         }
5605
5606         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5607                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5608                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5609
5610                 iwm_notif_intr(sc);
5611
5612                 /* enable periodic interrupt, see above */
5613                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5614                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5615                             IWM_CSR_INT_PERIODIC_ENA);
5616         }
5617
5618         if (__predict_false(r1 & ~handled))
5619                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5620                     "%s: unhandled interrupts: %x\n", __func__, r1);
5621         rv = 1;
5622
5623  out_ena:
5624         iwm_restore_interrupts(sc);
5625  out:
5626         IWM_UNLOCK(sc);
5627         return;
5628 }
5629
5630 /*
5631  * Autoconf glue-sniffing
5632  */
5633 #define PCI_VENDOR_INTEL                0x8086
5634 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5635 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5636 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5637 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5638 #define PCI_PRODUCT_INTEL_WL_3168_1     0x24fb
5639 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5640 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5641 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5642 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5643 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5644 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5645 #define PCI_PRODUCT_INTEL_WL_8265_1     0x24fd
5646
5647 static const struct iwm_devices {
5648         uint16_t                device;
5649         const struct iwm_cfg    *cfg;
5650 } iwm_devices[] = {
5651         { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5652         { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5653         { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5654         { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5655         { PCI_PRODUCT_INTEL_WL_3168_1, &iwm3168_cfg },
5656         { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5657         { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5658         { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5659         { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5660         { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5661         { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5662         { PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
5663 };
5664
5665 static int
5666 iwm_probe(device_t dev)
5667 {
5668         int i;
5669
5670         for (i = 0; i < nitems(iwm_devices); i++) {
5671                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5672                     pci_get_device(dev) == iwm_devices[i].device) {
5673                         device_set_desc(dev, iwm_devices[i].cfg->name);
5674                         return (BUS_PROBE_DEFAULT);
5675                 }
5676         }
5677
5678         return (ENXIO);
5679 }
5680
5681 static int
5682 iwm_dev_check(device_t dev)
5683 {
5684         struct iwm_softc *sc;
5685         uint16_t devid;
5686         int i;
5687
5688         sc = device_get_softc(dev);
5689
5690         devid = pci_get_device(dev);
5691         for (i = 0; i < nitems(iwm_devices); i++) {
5692                 if (iwm_devices[i].device == devid) {
5693                         sc->cfg = iwm_devices[i].cfg;
5694                         return (0);
5695                 }
5696         }
5697         device_printf(dev, "unknown adapter type\n");
5698         return ENXIO;
5699 }
5700
5701 /* PCI registers */
5702 #define PCI_CFG_RETRY_TIMEOUT   0x041
5703
5704 static int
5705 iwm_pci_attach(device_t dev)
5706 {
5707         struct iwm_softc *sc;
5708         int count, error, rid;
5709         uint16_t reg;
5710
5711         sc = device_get_softc(dev);
5712
5713         /* We disable the RETRY_TIMEOUT register (0x41) to keep
5714          * PCI Tx retries from interfering with C3 CPU state */
5715         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5716
5717         /* Enable bus-mastering and hardware bug workaround. */
5718         pci_enable_busmaster(dev);
5719         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5720         /* if !MSI */
5721         if (reg & PCIM_STATUS_INTxSTATE) {
5722                 reg &= ~PCIM_STATUS_INTxSTATE;
5723         }
5724         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5725
5726         rid = PCIR_BAR(0);
5727         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5728             RF_ACTIVE);
5729         if (sc->sc_mem == NULL) {
5730                 device_printf(sc->sc_dev, "can't map mem space\n");
5731                 return (ENXIO);
5732         }
5733         sc->sc_st = rman_get_bustag(sc->sc_mem);
5734         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5735
5736         /* Install interrupt handler. */
5737         count = 1;
5738         rid = 0;
5739         if (pci_alloc_msi(dev, &count) == 0)
5740                 rid = 1;
5741         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5742             (rid != 0 ? 0 : RF_SHAREABLE));
5743         if (sc->sc_irq == NULL) {
5744                 device_printf(dev, "can't map interrupt\n");
5745                         return (ENXIO);
5746         }
5747         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5748             NULL, iwm_intr, sc, &sc->sc_ih);
5749         if (sc->sc_ih == NULL) {
5750                 device_printf(dev, "can't establish interrupt");
5751                         return (ENXIO);
5752         }
5753         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5754
5755         return (0);
5756 }
5757
5758 static void
5759 iwm_pci_detach(device_t dev)
5760 {
5761         struct iwm_softc *sc = device_get_softc(dev);
5762
5763         if (sc->sc_irq != NULL) {
5764                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5765                 bus_release_resource(dev, SYS_RES_IRQ,
5766                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5767                 pci_release_msi(dev);
5768         }
5769         if (sc->sc_mem != NULL)
5770                 bus_release_resource(dev, SYS_RES_MEMORY,
5771                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5772 }
5773
5774
5775
5776 static int
5777 iwm_attach(device_t dev)
5778 {
5779         struct iwm_softc *sc = device_get_softc(dev);
5780         struct ieee80211com *ic = &sc->sc_ic;
5781         int error;
5782         int txq_i, i;
5783
5784         sc->sc_dev = dev;
5785         sc->sc_attached = 1;
5786         IWM_LOCK_INIT(sc);
5787         mbufq_init(&sc->sc_snd, ifqmaxlen);
5788         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5789         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5790         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5791
5792         sc->sc_notif_wait = iwm_notification_wait_init(sc);
5793         if (sc->sc_notif_wait == NULL) {
5794                 device_printf(dev, "failed to init notification wait struct\n");
5795                 goto fail;
5796         }
5797
5798         sc->sf_state = IWM_SF_UNINIT;
5799
5800         /* Init phy db */
5801         sc->sc_phy_db = iwm_phy_db_init(sc);
5802         if (!sc->sc_phy_db) {
5803                 device_printf(dev, "Cannot init phy_db\n");
5804                 goto fail;
5805         }
5806
5807         /* Set EBS as successful as long as not stated otherwise by the FW. */
5808         sc->last_ebs_successful = TRUE;
5809
5810         /* PCI attach */
5811         error = iwm_pci_attach(dev);
5812         if (error != 0)
5813                 goto fail;
5814
5815         sc->sc_wantresp = -1;
5816
5817         /* Match device id */
5818         error = iwm_dev_check(dev);
5819         if (error != 0)
5820                 goto fail;
5821
5822         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5823         /*
5824          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5825          * changed, and now the revision step also includes bit 0-1 (no more
5826          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5827          * in the old format.
5828          */
5829         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5830                 int ret;
5831                 uint32_t hw_step;
5832
5833                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5834                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5835
5836                 if (iwm_prepare_card_hw(sc) != 0) {
5837                         device_printf(dev, "could not initialize hardware\n");
5838                         goto fail;
5839                 }
5840
5841                 /*
5842                  * In order to recognize C step the driver should read the
5843                  * chip version id located at the AUX bus MISC address.
5844                  */
5845                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5846                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5847                 DELAY(2);
5848
5849                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5850                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5851                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5852                                    25000);
5853                 if (!ret) {
5854                         device_printf(sc->sc_dev,
5855                             "Failed to wake up the nic\n");
5856                         goto fail;
5857                 }
5858
5859                 if (iwm_nic_lock(sc)) {
5860                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5861                         hw_step |= IWM_ENABLE_WFPM;
5862                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5863                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5864                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5865                         if (hw_step == 0x3)
5866                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5867                                                 (IWM_SILICON_C_STEP << 2);
5868                         iwm_nic_unlock(sc);
5869                 } else {
5870                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
5871                         goto fail;
5872                 }
5873         }
5874
5875         /* special-case 7265D, it has the same PCI IDs. */
5876         if (sc->cfg == &iwm7265_cfg &&
5877             (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5878                 sc->cfg = &iwm7265d_cfg;
5879         }
5880
5881         /* Allocate DMA memory for firmware transfers. */
5882         if ((error = iwm_alloc_fwmem(sc)) != 0) {
5883                 device_printf(dev, "could not allocate memory for firmware\n");
5884                 goto fail;
5885         }
5886
5887         /* Allocate "Keep Warm" page. */
5888         if ((error = iwm_alloc_kw(sc)) != 0) {
5889                 device_printf(dev, "could not allocate keep warm page\n");
5890                 goto fail;
5891         }
5892
5893         /* We use ICT interrupts */
5894         if ((error = iwm_alloc_ict(sc)) != 0) {
5895                 device_printf(dev, "could not allocate ICT table\n");
5896                 goto fail;
5897         }
5898
5899         /* Allocate TX scheduler "rings". */
5900         if ((error = iwm_alloc_sched(sc)) != 0) {
5901                 device_printf(dev, "could not allocate TX scheduler rings\n");
5902                 goto fail;
5903         }
5904
5905         /* Allocate TX rings */
5906         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5907                 if ((error = iwm_alloc_tx_ring(sc,
5908                     &sc->txq[txq_i], txq_i)) != 0) {
5909                         device_printf(dev,
5910                             "could not allocate TX ring %d\n",
5911                             txq_i);
5912                         goto fail;
5913                 }
5914         }
5915
5916         /* Allocate RX ring. */
5917         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5918                 device_printf(dev, "could not allocate RX ring\n");
5919                 goto fail;
5920         }
5921
5922         /* Clear pending interrupts. */
5923         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5924
5925         ic->ic_softc = sc;
5926         ic->ic_name = device_get_nameunit(sc->sc_dev);
5927         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
5928         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
5929
5930         /* Set device capabilities. */
5931         ic->ic_caps =
5932             IEEE80211_C_STA |
5933             IEEE80211_C_WPA |           /* WPA/RSN */
5934             IEEE80211_C_WME |
5935             IEEE80211_C_PMGT |
5936             IEEE80211_C_SHSLOT |        /* short slot time supported */
5937             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
5938 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
5939             ;
5940         /* Advertise full-offload scanning */
5941         ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
5942         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5943                 sc->sc_phyctxt[i].id = i;
5944                 sc->sc_phyctxt[i].color = 0;
5945                 sc->sc_phyctxt[i].ref = 0;
5946                 sc->sc_phyctxt[i].channel = NULL;
5947         }
5948
5949         /* Default noise floor */
5950         sc->sc_noise = -96;
5951
5952         /* Max RSSI */
5953         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5954
5955 #ifdef IWM_DEBUG
5956         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
5957             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
5958             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
5959 #endif
5960
5961         error = iwm_read_firmware(sc);
5962         if (error) {
5963                 goto fail;
5964         } else if (sc->sc_fw.fw_fp == NULL) {
5965                 /*
5966                  * XXX Add a solution for properly deferring firmware load
5967                  *     during bootup.
5968                  */
5969                 goto fail;
5970         } else {
5971                 sc->sc_preinit_hook.ich_func = iwm_preinit;
5972                 sc->sc_preinit_hook.ich_arg = sc;
5973                 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5974                         device_printf(dev,
5975                             "config_intrhook_establish failed\n");
5976                         goto fail;
5977                 }
5978         }
5979
5980         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
5981             "<-%s\n", __func__);
5982
5983         return 0;
5984
5985         /* Free allocated memory if something failed during attachment. */
5986 fail:
5987         iwm_detach_local(sc, 0);
5988
5989         return ENXIO;
5990 }
5991
5992 static int
5993 iwm_is_valid_ether_addr(uint8_t *addr)
5994 {
5995         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
5996
5997         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
5998                 return (FALSE);
5999
6000         return (TRUE);
6001 }
6002
6003 static int
6004 iwm_wme_update(struct ieee80211com *ic)
6005 {
6006 #define IWM_EXP2(x)     ((1 << (x)) - 1)        /* CWmin = 2^ECWmin - 1 */
6007         struct iwm_softc *sc = ic->ic_softc;
6008         struct chanAccParams chp;
6009         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6010         struct iwm_vap *ivp = IWM_VAP(vap);
6011         struct iwm_node *in;
6012         struct wmeParams tmp[WME_NUM_AC];
6013         int aci, error;
6014
6015         if (vap == NULL)
6016                 return (0);
6017
6018         ieee80211_wme_ic_getparams(ic, &chp);
6019
6020         IEEE80211_LOCK(ic);
6021         for (aci = 0; aci < WME_NUM_AC; aci++)
6022                 tmp[aci] = chp.cap_wmeParams[aci];
6023         IEEE80211_UNLOCK(ic);
6024
6025         IWM_LOCK(sc);
6026         for (aci = 0; aci < WME_NUM_AC; aci++) {
6027                 const struct wmeParams *ac = &tmp[aci];
6028                 ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6029                 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6030                 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6031                 ivp->queue_params[aci].edca_txop =
6032                     IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6033         }
6034         ivp->have_wme = TRUE;
6035         if (ivp->is_uploaded && vap->iv_bss != NULL) {
6036                 in = IWM_NODE(vap->iv_bss);
6037                 if (in->in_assoc) {
6038                         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6039                                 device_printf(sc->sc_dev,
6040                                     "%s: failed to update MAC\n", __func__);
6041                         }
6042                 }
6043         }
6044         IWM_UNLOCK(sc);
6045
6046         return (0);
6047 #undef IWM_EXP2
6048 }
6049
6050 static void
6051 iwm_preinit(void *arg)
6052 {
6053         struct iwm_softc *sc = arg;
6054         device_t dev = sc->sc_dev;
6055         struct ieee80211com *ic = &sc->sc_ic;
6056         int error;
6057
6058         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6059             "->%s\n", __func__);
6060
6061         IWM_LOCK(sc);
6062         if ((error = iwm_start_hw(sc)) != 0) {
6063                 device_printf(dev, "could not initialize hardware\n");
6064                 IWM_UNLOCK(sc);
6065                 goto fail;
6066         }
6067
6068         error = iwm_run_init_mvm_ucode(sc, 1);
6069         iwm_stop_device(sc);
6070         if (error) {
6071                 IWM_UNLOCK(sc);
6072                 goto fail;
6073         }
6074         device_printf(dev,
6075             "hw rev 0x%x, fw ver %s, address %s\n",
6076             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6077             sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6078
6079         /* not all hardware can do 5GHz band */
6080         if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6081                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6082                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6083         IWM_UNLOCK(sc);
6084
6085         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6086             ic->ic_channels);
6087
6088         /*
6089          * At this point we've committed - if we fail to do setup,
6090          * we now also have to tear down the net80211 state.
6091          */
6092         ieee80211_ifattach(ic);
6093         ic->ic_vap_create = iwm_vap_create;
6094         ic->ic_vap_delete = iwm_vap_delete;
6095         ic->ic_raw_xmit = iwm_raw_xmit;
6096         ic->ic_node_alloc = iwm_node_alloc;
6097         ic->ic_scan_start = iwm_scan_start;
6098         ic->ic_scan_end = iwm_scan_end;
6099         ic->ic_update_mcast = iwm_update_mcast;
6100         ic->ic_getradiocaps = iwm_init_channel_map;
6101         ic->ic_set_channel = iwm_set_channel;
6102         ic->ic_scan_curchan = iwm_scan_curchan;
6103         ic->ic_scan_mindwell = iwm_scan_mindwell;
6104         ic->ic_wme.wme_update = iwm_wme_update;
6105         ic->ic_parent = iwm_parent;
6106         ic->ic_transmit = iwm_transmit;
6107         iwm_radiotap_attach(sc);
6108         if (bootverbose)
6109                 ieee80211_announce(ic);
6110
6111         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6112             "<-%s\n", __func__);
6113         config_intrhook_disestablish(&sc->sc_preinit_hook);
6114
6115         return;
6116 fail:
6117         config_intrhook_disestablish(&sc->sc_preinit_hook);
6118         iwm_detach_local(sc, 0);
6119 }
6120
6121 /*
6122  * Attach the interface to 802.11 radiotap.
6123  */
6124 static void
6125 iwm_radiotap_attach(struct iwm_softc *sc)
6126 {
6127         struct ieee80211com *ic = &sc->sc_ic;
6128
6129         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6130             "->%s begin\n", __func__);
6131         ieee80211_radiotap_attach(ic,
6132             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6133                 IWM_TX_RADIOTAP_PRESENT,
6134             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6135                 IWM_RX_RADIOTAP_PRESENT);
6136         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6137             "->%s end\n", __func__);
6138 }
6139
6140 static struct ieee80211vap *
6141 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6142     enum ieee80211_opmode opmode, int flags,
6143     const uint8_t bssid[IEEE80211_ADDR_LEN],
6144     const uint8_t mac[IEEE80211_ADDR_LEN])
6145 {
6146         struct iwm_vap *ivp;
6147         struct ieee80211vap *vap;
6148
6149         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6150                 return NULL;
6151         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6152         vap = &ivp->iv_vap;
6153         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6154         vap->iv_bmissthreshold = 10;            /* override default */
6155         /* Override with driver methods. */
6156         ivp->iv_newstate = vap->iv_newstate;
6157         vap->iv_newstate = iwm_newstate;
6158
6159         ivp->id = IWM_DEFAULT_MACID;
6160         ivp->color = IWM_DEFAULT_COLOR;
6161
6162         ivp->have_wme = FALSE;
6163         ivp->ps_disabled = FALSE;
6164
6165         ieee80211_ratectl_init(vap);
6166         /* Complete setup. */
6167         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6168             mac);
6169         ic->ic_opmode = opmode;
6170
6171         return vap;
6172 }
6173
6174 static void
6175 iwm_vap_delete(struct ieee80211vap *vap)
6176 {
6177         struct iwm_vap *ivp = IWM_VAP(vap);
6178
6179         ieee80211_ratectl_deinit(vap);
6180         ieee80211_vap_detach(vap);
6181         free(ivp, M_80211_VAP);
6182 }
6183
6184 static void
6185 iwm_xmit_queue_drain(struct iwm_softc *sc)
6186 {
6187         struct mbuf *m;
6188         struct ieee80211_node *ni;
6189
6190         while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6191                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6192                 ieee80211_free_node(ni);
6193                 m_freem(m);
6194         }
6195 }
6196
6197 static void
6198 iwm_scan_start(struct ieee80211com *ic)
6199 {
6200         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6201         struct iwm_softc *sc = ic->ic_softc;
6202         int error;
6203
6204         IWM_LOCK(sc);
6205         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6206                 /* This should not be possible */
6207                 device_printf(sc->sc_dev,
6208                     "%s: Previous scan not completed yet\n", __func__);
6209         }
6210         if (fw_has_capa(&sc->sc_fw.ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6211                 error = iwm_mvm_umac_scan(sc);
6212         else
6213                 error = iwm_mvm_lmac_scan(sc);
6214         if (error != 0) {
6215                 device_printf(sc->sc_dev, "could not initiate scan\n");
6216                 IWM_UNLOCK(sc);
6217                 ieee80211_cancel_scan(vap);
6218         } else {
6219                 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6220                 iwm_led_blink_start(sc);
6221                 IWM_UNLOCK(sc);
6222         }
6223 }
6224
6225 static void
6226 iwm_scan_end(struct ieee80211com *ic)
6227 {
6228         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6229         struct iwm_softc *sc = ic->ic_softc;
6230
6231         IWM_LOCK(sc);
6232         iwm_led_blink_stop(sc);
6233         if (vap->iv_state == IEEE80211_S_RUN)
6234                 iwm_mvm_led_enable(sc);
6235         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6236                 /*
6237                  * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6238                  * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6239                  * taskqueue.
6240                  */
6241                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6242                 iwm_mvm_scan_stop_wait(sc);
6243         }
6244         IWM_UNLOCK(sc);
6245
6246         /*
6247          * Make sure we don't race, if sc_es_task is still enqueued here.
6248          * This is to make sure that it won't call ieee80211_scan_done
6249          * when we have already started the next scan.
6250          */
6251         taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6252 }
6253
6254 static void
6255 iwm_update_mcast(struct ieee80211com *ic)
6256 {
6257 }
6258
6259 static void
6260 iwm_set_channel(struct ieee80211com *ic)
6261 {
6262 }
6263
6264 static void
6265 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6266 {
6267 }
6268
6269 static void
6270 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6271 {
6272         return;
6273 }
6274
6275 void
6276 iwm_init_task(void *arg1)
6277 {
6278         struct iwm_softc *sc = arg1;
6279
6280         IWM_LOCK(sc);
6281         while (sc->sc_flags & IWM_FLAG_BUSY)
6282                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6283         sc->sc_flags |= IWM_FLAG_BUSY;
6284         iwm_stop(sc);
6285         if (sc->sc_ic.ic_nrunning > 0)
6286                 iwm_init(sc);
6287         sc->sc_flags &= ~IWM_FLAG_BUSY;
6288         wakeup(&sc->sc_flags);
6289         IWM_UNLOCK(sc);
6290 }
6291
6292 static int
6293 iwm_resume(device_t dev)
6294 {
6295         struct iwm_softc *sc = device_get_softc(dev);
6296         int do_reinit = 0;
6297
6298         /*
6299          * We disable the RETRY_TIMEOUT register (0x41) to keep
6300          * PCI Tx retries from interfering with C3 CPU state.
6301          */
6302         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6303
6304         if (!sc->sc_attached)
6305                 return 0;
6306
6307         iwm_init_task(device_get_softc(dev));
6308
6309         IWM_LOCK(sc);
6310         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6311                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6312                 do_reinit = 1;
6313         }
6314         IWM_UNLOCK(sc);
6315
6316         if (do_reinit)
6317                 ieee80211_resume_all(&sc->sc_ic);
6318
6319         return 0;
6320 }
6321
6322 static int
6323 iwm_suspend(device_t dev)
6324 {
6325         int do_stop = 0;
6326         struct iwm_softc *sc = device_get_softc(dev);
6327
6328         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6329
6330         if (!sc->sc_attached)
6331                 return (0);
6332
6333         ieee80211_suspend_all(&sc->sc_ic);
6334
6335         if (do_stop) {
6336                 IWM_LOCK(sc);
6337                 iwm_stop(sc);
6338                 sc->sc_flags |= IWM_FLAG_SCANNING;
6339                 IWM_UNLOCK(sc);
6340         }
6341
6342         return (0);
6343 }
6344
6345 static int
6346 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6347 {
6348         struct iwm_fw_info *fw = &sc->sc_fw;
6349         device_t dev = sc->sc_dev;
6350         int i;
6351
6352         if (!sc->sc_attached)
6353                 return 0;
6354         sc->sc_attached = 0;
6355
6356         if (do_net80211)
6357                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6358
6359         callout_drain(&sc->sc_led_blink_to);
6360         callout_drain(&sc->sc_watchdog_to);
6361         iwm_stop_device(sc);
6362         if (do_net80211) {
6363                 IWM_LOCK(sc);
6364                 iwm_xmit_queue_drain(sc);
6365                 IWM_UNLOCK(sc);
6366                 ieee80211_ifdetach(&sc->sc_ic);
6367         }
6368
6369         iwm_phy_db_free(sc->sc_phy_db);
6370         sc->sc_phy_db = NULL;
6371
6372         iwm_free_nvm_data(sc->nvm_data);
6373
6374         /* Free descriptor rings */
6375         iwm_free_rx_ring(sc, &sc->rxq);
6376         for (i = 0; i < nitems(sc->txq); i++)
6377                 iwm_free_tx_ring(sc, &sc->txq[i]);
6378
6379         /* Free firmware */
6380         if (fw->fw_fp != NULL)
6381                 iwm_fw_info_free(fw);
6382
6383         /* Free scheduler */
6384         iwm_dma_contig_free(&sc->sched_dma);
6385         iwm_dma_contig_free(&sc->ict_dma);
6386         iwm_dma_contig_free(&sc->kw_dma);
6387         iwm_dma_contig_free(&sc->fw_dma);
6388
6389         iwm_free_fw_paging(sc);
6390
6391         /* Finished with the hardware - detach things */
6392         iwm_pci_detach(dev);
6393
6394         if (sc->sc_notif_wait != NULL) {
6395                 iwm_notification_wait_free(sc->sc_notif_wait);
6396                 sc->sc_notif_wait = NULL;
6397         }
6398
6399         IWM_LOCK_DESTROY(sc);
6400
6401         return (0);
6402 }
6403
6404 static int
6405 iwm_detach(device_t dev)
6406 {
6407         struct iwm_softc *sc = device_get_softc(dev);
6408
6409         return (iwm_detach_local(sc, 1));
6410 }
6411
6412 static device_method_t iwm_pci_methods[] = {
6413         /* Device interface */
6414         DEVMETHOD(device_probe,         iwm_probe),
6415         DEVMETHOD(device_attach,        iwm_attach),
6416         DEVMETHOD(device_detach,        iwm_detach),
6417         DEVMETHOD(device_suspend,       iwm_suspend),
6418         DEVMETHOD(device_resume,        iwm_resume),
6419
6420         DEVMETHOD_END
6421 };
6422
6423 static driver_t iwm_pci_driver = {
6424         "iwm",
6425         iwm_pci_methods,
6426         sizeof (struct iwm_softc)
6427 };
6428
6429 static devclass_t iwm_devclass;
6430
6431 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6432 MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver,
6433     iwm_devices, nitems(iwm_devices));
6434 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6435 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6436 MODULE_DEPEND(iwm, wlan, 1, 1, 1);