]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
[iwm] Remove dead code from iwm_pcie_load_cpu_sections().
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109 #include "opt_iwm.h"
110
111 #include <sys/param.h>
112 #include <sys/bus.h>
113 #include <sys/conf.h>
114 #include <sys/endian.h>
115 #include <sys/firmware.h>
116 #include <sys/kernel.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/module.h>
121 #include <sys/proc.h>
122 #include <sys/rman.h>
123 #include <sys/socket.h>
124 #include <sys/sockio.h>
125 #include <sys/sysctl.h>
126 #include <sys/linker.h>
127
128 #include <machine/bus.h>
129 #include <machine/endian.h>
130 #include <machine/resource.h>
131
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcireg.h>
134
135 #include <net/bpf.h>
136
137 #include <net/if.h>
138 #include <net/if_var.h>
139 #include <net/if_arp.h>
140 #include <net/if_dl.h>
141 #include <net/if_media.h>
142 #include <net/if_types.h>
143
144 #include <netinet/in.h>
145 #include <netinet/in_systm.h>
146 #include <netinet/if_ether.h>
147 #include <netinet/ip.h>
148
149 #include <net80211/ieee80211_var.h>
150 #include <net80211/ieee80211_regdomain.h>
151 #include <net80211/ieee80211_ratectl.h>
152 #include <net80211/ieee80211_radiotap.h>
153
154 #include <dev/iwm/if_iwmreg.h>
155 #include <dev/iwm/if_iwmvar.h>
156 #include <dev/iwm/if_iwm_config.h>
157 #include <dev/iwm/if_iwm_debug.h>
158 #include <dev/iwm/if_iwm_notif_wait.h>
159 #include <dev/iwm/if_iwm_util.h>
160 #include <dev/iwm/if_iwm_binding.h>
161 #include <dev/iwm/if_iwm_phy_db.h>
162 #include <dev/iwm/if_iwm_mac_ctxt.h>
163 #include <dev/iwm/if_iwm_phy_ctxt.h>
164 #include <dev/iwm/if_iwm_time_event.h>
165 #include <dev/iwm/if_iwm_power.h>
166 #include <dev/iwm/if_iwm_scan.h>
167 #include <dev/iwm/if_iwm_sf.h>
168 #include <dev/iwm/if_iwm_sta.h>
169
170 #include <dev/iwm/if_iwm_pcie_trans.h>
171 #include <dev/iwm/if_iwm_led.h>
172 #include <dev/iwm/if_iwm_fw.h>
173
174 /* From DragonflyBSD */
175 #define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
176
177 const uint8_t iwm_nvm_channels[] = {
178         /* 2.4 GHz */
179         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
180         /* 5 GHz */
181         36, 40, 44, 48, 52, 56, 60, 64,
182         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
183         149, 153, 157, 161, 165
184 };
185 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
186     "IWM_NUM_CHANNELS is too small");
187
188 const uint8_t iwm_nvm_channels_8000[] = {
189         /* 2.4 GHz */
190         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
191         /* 5 GHz */
192         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
193         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
194         149, 153, 157, 161, 165, 169, 173, 177, 181
195 };
196 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
197     "IWM_NUM_CHANNELS_8000 is too small");
198
199 #define IWM_NUM_2GHZ_CHANNELS   14
200 #define IWM_N_HW_ADDR_MASK      0xF
201
202 /*
203  * XXX For now, there's simply a fixed set of rate table entries
204  * that are populated.
205  */
206 const struct iwm_rate {
207         uint8_t rate;
208         uint8_t plcp;
209 } iwm_rates[] = {
210         {   2,  IWM_RATE_1M_PLCP  },
211         {   4,  IWM_RATE_2M_PLCP  },
212         {  11,  IWM_RATE_5M_PLCP  },
213         {  22,  IWM_RATE_11M_PLCP },
214         {  12,  IWM_RATE_6M_PLCP  },
215         {  18,  IWM_RATE_9M_PLCP  },
216         {  24,  IWM_RATE_12M_PLCP },
217         {  36,  IWM_RATE_18M_PLCP },
218         {  48,  IWM_RATE_24M_PLCP },
219         {  72,  IWM_RATE_36M_PLCP },
220         {  96,  IWM_RATE_48M_PLCP },
221         { 108,  IWM_RATE_54M_PLCP },
222 };
223 #define IWM_RIDX_CCK    0
224 #define IWM_RIDX_OFDM   4
225 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
226 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
227 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
228
229 struct iwm_nvm_section {
230         uint16_t length;
231         uint8_t *data;
232 };
233
234 #define IWM_MVM_UCODE_ALIVE_TIMEOUT     hz
235 #define IWM_MVM_UCODE_CALIB_TIMEOUT     (2*hz)
236
237 struct iwm_mvm_alive_data {
238         int valid;
239         uint32_t scd_base_addr;
240 };
241
242 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
243 static int      iwm_firmware_store_section(struct iwm_softc *,
244                                            enum iwm_ucode_type,
245                                            const uint8_t *, size_t);
246 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
247 static void     iwm_fw_info_free(struct iwm_fw_info *);
248 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
249 static int      iwm_alloc_fwmem(struct iwm_softc *);
250 static int      iwm_alloc_sched(struct iwm_softc *);
251 static int      iwm_alloc_kw(struct iwm_softc *);
252 static int      iwm_alloc_ict(struct iwm_softc *);
253 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
256 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
257                                   int);
258 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
260 static void     iwm_enable_interrupts(struct iwm_softc *);
261 static void     iwm_restore_interrupts(struct iwm_softc *);
262 static void     iwm_disable_interrupts(struct iwm_softc *);
263 static void     iwm_ict_reset(struct iwm_softc *);
264 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
265 static void     iwm_stop_device(struct iwm_softc *);
266 static void     iwm_mvm_nic_config(struct iwm_softc *);
267 static int      iwm_nic_rx_init(struct iwm_softc *);
268 static int      iwm_nic_tx_init(struct iwm_softc *);
269 static int      iwm_nic_init(struct iwm_softc *);
270 static int      iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
271 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
272                                    uint16_t, uint8_t *, uint16_t *);
273 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
274                                      uint16_t *, uint32_t);
275 static uint32_t iwm_eeprom_channel_flags(uint16_t);
276 static void     iwm_add_channel_band(struct iwm_softc *,
277                     struct ieee80211_channel[], int, int *, int, size_t,
278                     const uint8_t[]);
279 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
280                     struct ieee80211_channel[]);
281 static struct iwm_nvm_data *
282         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
283                            const uint16_t *, const uint16_t *,
284                            const uint16_t *, const uint16_t *,
285                            const uint16_t *);
286 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
287 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
288                                                struct iwm_nvm_data *,
289                                                const uint16_t *,
290                                                const uint16_t *);
291 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
292                             const uint16_t *);
293 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
294 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
295                                   const uint16_t *);
296 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
297                                    const uint16_t *);
298 static void     iwm_set_radio_cfg(const struct iwm_softc *,
299                                   struct iwm_nvm_data *, uint32_t);
300 static struct iwm_nvm_data *
301         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
302 static int      iwm_nvm_init(struct iwm_softc *);
303 static int      iwm_pcie_load_section(struct iwm_softc *, uint8_t,
304                                       const struct iwm_fw_desc *);
305 static int      iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
306                                              bus_addr_t, uint32_t);
307 static int      iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
308                                                 const struct iwm_fw_sects *,
309                                                 int, int *);
310 static int      iwm_pcie_load_cpu_sections(struct iwm_softc *,
311                                            const struct iwm_fw_sects *,
312                                            int, int *);
313 static int      iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
314                                                const struct iwm_fw_sects *);
315 static int      iwm_pcie_load_given_ucode(struct iwm_softc *,
316                                           const struct iwm_fw_sects *);
317 static int      iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
318 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
319 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
320 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
321                                               enum iwm_ucode_type);
322 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
323 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
324 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
325                                             struct iwm_rx_phy_info *);
326 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
327                                       struct iwm_rx_packet *);
328 static int      iwm_get_noise(struct iwm_softc *sc,
329                     const struct iwm_mvm_statistics_rx_non_phy *);
330 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
331                                     uint32_t, boolean_t);
332 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
333                                          struct iwm_rx_packet *,
334                                          struct iwm_node *);
335 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
336 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
337 #if 0
338 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
339                                  uint16_t);
340 #endif
341 static const struct iwm_rate *
342         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
343                         struct mbuf *, struct iwm_tx_cmd *);
344 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
345                        struct ieee80211_node *, int);
346 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
347                              const struct ieee80211_bpf_params *);
348 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
349 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
350 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
351 static struct ieee80211_node *
352                 iwm_node_alloc(struct ieee80211vap *,
353                                const uint8_t[IEEE80211_ADDR_LEN]);
354 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
355 static int      iwm_media_change(struct ifnet *);
356 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
357 static void     iwm_endscan_cb(void *, int);
358 static int      iwm_send_bt_init_conf(struct iwm_softc *);
359 static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
360 static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
361 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
362 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
363 static int      iwm_init_hw(struct iwm_softc *);
364 static void     iwm_init(struct iwm_softc *);
365 static void     iwm_start(struct iwm_softc *);
366 static void     iwm_stop(struct iwm_softc *);
367 static void     iwm_watchdog(void *);
368 static void     iwm_parent(struct ieee80211com *);
369 #ifdef IWM_DEBUG
370 static const char *
371                 iwm_desc_lookup(uint32_t);
372 static void     iwm_nic_error(struct iwm_softc *);
373 static void     iwm_nic_umac_error(struct iwm_softc *);
374 #endif
375 static void     iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
376 static void     iwm_notif_intr(struct iwm_softc *);
377 static void     iwm_intr(void *);
378 static int      iwm_attach(device_t);
379 static int      iwm_is_valid_ether_addr(uint8_t *);
380 static void     iwm_preinit(void *);
381 static int      iwm_detach_local(struct iwm_softc *sc, int);
382 static void     iwm_init_task(void *);
383 static void     iwm_radiotap_attach(struct iwm_softc *);
384 static struct ieee80211vap *
385                 iwm_vap_create(struct ieee80211com *,
386                                const char [IFNAMSIZ], int,
387                                enum ieee80211_opmode, int,
388                                const uint8_t [IEEE80211_ADDR_LEN],
389                                const uint8_t [IEEE80211_ADDR_LEN]);
390 static void     iwm_vap_delete(struct ieee80211vap *);
391 static void     iwm_scan_start(struct ieee80211com *);
392 static void     iwm_scan_end(struct ieee80211com *);
393 static void     iwm_update_mcast(struct ieee80211com *);
394 static void     iwm_set_channel(struct ieee80211com *);
395 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
396 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
397 static int      iwm_detach(device_t);
398
399 static int      iwm_lar_disable = 0;
400 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
401
402 /*
403  * Firmware parser.
404  */
405
406 static int
407 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
408 {
409         const struct iwm_fw_cscheme_list *l = (const void *)data;
410
411         if (dlen < sizeof(*l) ||
412             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
413                 return EINVAL;
414
415         /* we don't actually store anything for now, always use s/w crypto */
416
417         return 0;
418 }
419
420 static int
421 iwm_firmware_store_section(struct iwm_softc *sc,
422     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
423 {
424         struct iwm_fw_sects *fws;
425         struct iwm_fw_desc *fwone;
426
427         if (type >= IWM_UCODE_TYPE_MAX)
428                 return EINVAL;
429         if (dlen < sizeof(uint32_t))
430                 return EINVAL;
431
432         fws = &sc->sc_fw.fw_sects[type];
433         if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
434                 return EINVAL;
435
436         fwone = &fws->fw_sect[fws->fw_count];
437
438         /* first 32bit are device load offset */
439         memcpy(&fwone->offset, data, sizeof(uint32_t));
440
441         /* rest is data */
442         fwone->data = data + sizeof(uint32_t);
443         fwone->len = dlen - sizeof(uint32_t);
444
445         fws->fw_count++;
446
447         return 0;
448 }
449
450 #define IWM_DEFAULT_SCAN_CHANNELS 40
451
452 /* iwlwifi: iwl-drv.c */
453 struct iwm_tlv_calib_data {
454         uint32_t ucode_type;
455         struct iwm_tlv_calib_ctrl calib;
456 } __packed;
457
458 static int
459 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
460 {
461         const struct iwm_tlv_calib_data *def_calib = data;
462         uint32_t ucode_type = le32toh(def_calib->ucode_type);
463
464         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
465                 device_printf(sc->sc_dev,
466                     "Wrong ucode_type %u for default "
467                     "calibration.\n", ucode_type);
468                 return EINVAL;
469         }
470
471         sc->sc_default_calib[ucode_type].flow_trigger =
472             def_calib->calib.flow_trigger;
473         sc->sc_default_calib[ucode_type].event_trigger =
474             def_calib->calib.event_trigger;
475
476         return 0;
477 }
478
479 static int
480 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
481                         struct iwm_ucode_capabilities *capa)
482 {
483         const struct iwm_ucode_api *ucode_api = (const void *)data;
484         uint32_t api_index = le32toh(ucode_api->api_index);
485         uint32_t api_flags = le32toh(ucode_api->api_flags);
486         int i;
487
488         if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
489                 device_printf(sc->sc_dev,
490                     "api flags index %d larger than supported by driver\n",
491                     api_index);
492                 /* don't return an error so we can load FW that has more bits */
493                 return 0;
494         }
495
496         for (i = 0; i < 32; i++) {
497                 if (api_flags & (1U << i))
498                         setbit(capa->enabled_api, i + 32 * api_index);
499         }
500
501         return 0;
502 }
503
504 static int
505 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
506                            struct iwm_ucode_capabilities *capa)
507 {
508         const struct iwm_ucode_capa *ucode_capa = (const void *)data;
509         uint32_t api_index = le32toh(ucode_capa->api_index);
510         uint32_t api_flags = le32toh(ucode_capa->api_capa);
511         int i;
512
513         if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
514                 device_printf(sc->sc_dev,
515                     "capa flags index %d larger than supported by driver\n",
516                     api_index);
517                 /* don't return an error so we can load FW that has more bits */
518                 return 0;
519         }
520
521         for (i = 0; i < 32; i++) {
522                 if (api_flags & (1U << i))
523                         setbit(capa->enabled_capa, i + 32 * api_index);
524         }
525
526         return 0;
527 }
528
529 static void
530 iwm_fw_info_free(struct iwm_fw_info *fw)
531 {
532         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
533         fw->fw_fp = NULL;
534         /* don't touch fw->fw_status */
535         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
536 }
537
538 static int
539 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
540 {
541         struct iwm_fw_info *fw = &sc->sc_fw;
542         const struct iwm_tlv_ucode_header *uhdr;
543         const struct iwm_ucode_tlv *tlv;
544         struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
545         enum iwm_ucode_tlv_type tlv_type;
546         const struct firmware *fwp;
547         const uint8_t *data;
548         uint32_t tlv_len;
549         uint32_t usniffer_img;
550         const uint8_t *tlv_data;
551         uint32_t paging_mem_size;
552         int num_of_cpus;
553         int error = 0;
554         size_t len;
555
556         if (fw->fw_status == IWM_FW_STATUS_DONE &&
557             ucode_type != IWM_UCODE_INIT)
558                 return 0;
559
560         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
561                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
562         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
563
564         if (fw->fw_fp != NULL)
565                 iwm_fw_info_free(fw);
566
567         /*
568          * Load firmware into driver memory.
569          * fw_fp will be set.
570          */
571         IWM_UNLOCK(sc);
572         fwp = firmware_get(sc->cfg->fw_name);
573         IWM_LOCK(sc);
574         if (fwp == NULL) {
575                 device_printf(sc->sc_dev,
576                     "could not read firmware %s (error %d)\n",
577                     sc->cfg->fw_name, error);
578                 goto out;
579         }
580         fw->fw_fp = fwp;
581
582         /* (Re-)Initialize default values. */
583         capa->flags = 0;
584         capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
585         capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
586         memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
587         memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
588         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
589
590         /*
591          * Parse firmware contents
592          */
593
594         uhdr = (const void *)fw->fw_fp->data;
595         if (*(const uint32_t *)fw->fw_fp->data != 0
596             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
597                 device_printf(sc->sc_dev, "invalid firmware %s\n",
598                     sc->cfg->fw_name);
599                 error = EINVAL;
600                 goto out;
601         }
602
603         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
604             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
605             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
606             IWM_UCODE_API(le32toh(uhdr->ver)));
607         data = uhdr->data;
608         len = fw->fw_fp->datasize - sizeof(*uhdr);
609
610         while (len >= sizeof(*tlv)) {
611                 len -= sizeof(*tlv);
612                 tlv = (const void *)data;
613
614                 tlv_len = le32toh(tlv->length);
615                 tlv_type = le32toh(tlv->type);
616                 tlv_data = tlv->data;
617
618                 if (len < tlv_len) {
619                         device_printf(sc->sc_dev,
620                             "firmware too short: %zu bytes\n",
621                             len);
622                         error = EINVAL;
623                         goto parse_out;
624                 }
625                 len -= roundup2(tlv_len, 4);
626                 data += sizeof(tlv) + roundup2(tlv_len, 4);
627
628                 switch ((int)tlv_type) {
629                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
630                         if (tlv_len != sizeof(uint32_t)) {
631                                 device_printf(sc->sc_dev,
632                                     "%s: PROBE_MAX_LEN (%d) != sizeof(uint32_t)\n",
633                                     __func__,
634                                     (int) tlv_len);
635                                 error = EINVAL;
636                                 goto parse_out;
637                         }
638                         capa->max_probe_length =
639                             le32_to_cpup((const uint32_t *)tlv_data);
640                         /* limit it to something sensible */
641                         if (capa->max_probe_length >
642                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
643                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
644                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
645                                     "ridiculous\n", __func__);
646                                 error = EINVAL;
647                                 goto parse_out;
648                         }
649                         break;
650                 case IWM_UCODE_TLV_PAN:
651                         if (tlv_len) {
652                                 device_printf(sc->sc_dev,
653                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
654                                     __func__,
655                                     (int) tlv_len);
656                                 error = EINVAL;
657                                 goto parse_out;
658                         }
659                         capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
660                         break;
661                 case IWM_UCODE_TLV_FLAGS:
662                         if (tlv_len < sizeof(uint32_t)) {
663                                 device_printf(sc->sc_dev,
664                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
665                                     __func__,
666                                     (int) tlv_len);
667                                 error = EINVAL;
668                                 goto parse_out;
669                         }
670                         if (tlv_len % sizeof(uint32_t)) {
671                                 device_printf(sc->sc_dev,
672                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) %% sizeof(uint32_t)\n",
673                                     __func__,
674                                     (int) tlv_len);
675                                 error = EINVAL;
676                                 goto parse_out;
677                         }
678                         /*
679                          * Apparently there can be many flags, but Linux driver
680                          * parses only the first one, and so do we.
681                          *
682                          * XXX: why does this override IWM_UCODE_TLV_PAN?
683                          * Intentional or a bug?  Observations from
684                          * current firmware file:
685                          *  1) TLV_PAN is parsed first
686                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
687                          * ==> this resets TLV_PAN to itself... hnnnk
688                          */
689                         capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
690                         break;
691                 case IWM_UCODE_TLV_CSCHEME:
692                         if ((error = iwm_store_cscheme(sc,
693                             tlv_data, tlv_len)) != 0) {
694                                 device_printf(sc->sc_dev,
695                                     "%s: iwm_store_cscheme(): returned %d\n",
696                                     __func__,
697                                     error);
698                                 goto parse_out;
699                         }
700                         break;
701                 case IWM_UCODE_TLV_NUM_OF_CPU:
702                         if (tlv_len != sizeof(uint32_t)) {
703                                 device_printf(sc->sc_dev,
704                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
705                                     __func__,
706                                     (int) tlv_len);
707                                 error = EINVAL;
708                                 goto parse_out;
709                         }
710                         num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
711                         if (num_of_cpus == 2) {
712                                 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
713                                         TRUE;
714                                 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
715                                         TRUE;
716                                 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
717                                         TRUE;
718                         } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
719                                 device_printf(sc->sc_dev,
720                                     "%s: Driver supports only 1 or 2 CPUs\n",
721                                     __func__);
722                                 error = EINVAL;
723                                 goto parse_out;
724                         }
725                         break;
726                 case IWM_UCODE_TLV_SEC_RT:
727                         if ((error = iwm_firmware_store_section(sc,
728                             IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
729                                 device_printf(sc->sc_dev,
730                                     "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
731                                     __func__,
732                                     error);
733                                 goto parse_out;
734                         }
735                         break;
736                 case IWM_UCODE_TLV_SEC_INIT:
737                         if ((error = iwm_firmware_store_section(sc,
738                             IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
739                                 device_printf(sc->sc_dev,
740                                     "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
741                                     __func__,
742                                     error);
743                                 goto parse_out;
744                         }
745                         break;
746                 case IWM_UCODE_TLV_SEC_WOWLAN:
747                         if ((error = iwm_firmware_store_section(sc,
748                             IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
749                                 device_printf(sc->sc_dev,
750                                     "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
751                                     __func__,
752                                     error);
753                                 goto parse_out;
754                         }
755                         break;
756                 case IWM_UCODE_TLV_DEF_CALIB:
757                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
758                                 device_printf(sc->sc_dev,
759                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
760                                     __func__,
761                                     (int) tlv_len,
762                                     (int) sizeof(struct iwm_tlv_calib_data));
763                                 error = EINVAL;
764                                 goto parse_out;
765                         }
766                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
767                                 device_printf(sc->sc_dev,
768                                     "%s: iwm_set_default_calib() failed: %d\n",
769                                     __func__,
770                                     error);
771                                 goto parse_out;
772                         }
773                         break;
774                 case IWM_UCODE_TLV_PHY_SKU:
775                         if (tlv_len != sizeof(uint32_t)) {
776                                 error = EINVAL;
777                                 device_printf(sc->sc_dev,
778                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
779                                     __func__,
780                                     (int) tlv_len);
781                                 goto parse_out;
782                         }
783                         sc->sc_fw.phy_config =
784                             le32_to_cpup((const uint32_t *)tlv_data);
785                         sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
786                                                   IWM_FW_PHY_CFG_TX_CHAIN) >>
787                                                   IWM_FW_PHY_CFG_TX_CHAIN_POS;
788                         sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
789                                                   IWM_FW_PHY_CFG_RX_CHAIN) >>
790                                                   IWM_FW_PHY_CFG_RX_CHAIN_POS;
791                         break;
792
793                 case IWM_UCODE_TLV_API_CHANGES_SET: {
794                         if (tlv_len != sizeof(struct iwm_ucode_api)) {
795                                 error = EINVAL;
796                                 goto parse_out;
797                         }
798                         if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
799                                 error = EINVAL;
800                                 goto parse_out;
801                         }
802                         break;
803                 }
804
805                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
806                         if (tlv_len != sizeof(struct iwm_ucode_capa)) {
807                                 error = EINVAL;
808                                 goto parse_out;
809                         }
810                         if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
811                                 error = EINVAL;
812                                 goto parse_out;
813                         }
814                         break;
815                 }
816
817                 case 48: /* undocumented TLV */
818                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
819                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
820                         /* ignore, not used by current driver */
821                         break;
822
823                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
824                         if ((error = iwm_firmware_store_section(sc,
825                             IWM_UCODE_REGULAR_USNIFFER, tlv_data,
826                             tlv_len)) != 0)
827                                 goto parse_out;
828                         break;
829
830                 case IWM_UCODE_TLV_PAGING:
831                         if (tlv_len != sizeof(uint32_t)) {
832                                 error = EINVAL;
833                                 goto parse_out;
834                         }
835                         paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
836
837                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
838                             "%s: Paging: paging enabled (size = %u bytes)\n",
839                             __func__, paging_mem_size);
840                         if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
841                                 device_printf(sc->sc_dev,
842                                         "%s: Paging: driver supports up to %u bytes for paging image\n",
843                                         __func__, IWM_MAX_PAGING_IMAGE_SIZE);
844                                 error = EINVAL;
845                                 goto out;
846                         }
847                         if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
848                                 device_printf(sc->sc_dev,
849                                     "%s: Paging: image isn't multiple %u\n",
850                                     __func__, IWM_FW_PAGING_SIZE);
851                                 error = EINVAL;
852                                 goto out;
853                         }
854
855                         sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
856                             paging_mem_size;
857                         usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
858                         sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
859                             paging_mem_size;
860                         break;
861
862                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
863                         if (tlv_len != sizeof(uint32_t)) {
864                                 error = EINVAL;
865                                 goto parse_out;
866                         }
867                         capa->n_scan_channels =
868                             le32_to_cpup((const uint32_t *)tlv_data);
869                         break;
870
871                 case IWM_UCODE_TLV_FW_VERSION:
872                         if (tlv_len != sizeof(uint32_t) * 3) {
873                                 error = EINVAL;
874                                 goto parse_out;
875                         }
876                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
877                             "%d.%d.%d",
878                             le32toh(((const uint32_t *)tlv_data)[0]),
879                             le32toh(((const uint32_t *)tlv_data)[1]),
880                             le32toh(((const uint32_t *)tlv_data)[2]));
881                         break;
882
883                 case IWM_UCODE_TLV_FW_MEM_SEG:
884                         break;
885
886                 default:
887                         device_printf(sc->sc_dev,
888                             "%s: unknown firmware section %d, abort\n",
889                             __func__, tlv_type);
890                         error = EINVAL;
891                         goto parse_out;
892                 }
893         }
894
895         KASSERT(error == 0, ("unhandled error"));
896
897  parse_out:
898         if (error) {
899                 device_printf(sc->sc_dev, "firmware parse error %d, "
900                     "section type %d\n", error, tlv_type);
901         }
902
903  out:
904         if (error) {
905                 fw->fw_status = IWM_FW_STATUS_NONE;
906                 if (fw->fw_fp != NULL)
907                         iwm_fw_info_free(fw);
908         } else
909                 fw->fw_status = IWM_FW_STATUS_DONE;
910         wakeup(&sc->sc_fw);
911
912         return error;
913 }
914
915 /*
916  * DMA resource routines
917  */
918
919 /* fwmem is used to load firmware onto the card */
920 static int
921 iwm_alloc_fwmem(struct iwm_softc *sc)
922 {
923         /* Must be aligned on a 16-byte boundary. */
924         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
925             IWM_FH_MEM_TB_MAX_LENGTH, 16);
926 }
927
928 /* tx scheduler rings.  not used? */
929 static int
930 iwm_alloc_sched(struct iwm_softc *sc)
931 {
932         /* TX scheduler rings must be aligned on a 1KB boundary. */
933         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
934             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
935 }
936
937 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
938 static int
939 iwm_alloc_kw(struct iwm_softc *sc)
940 {
941         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
942 }
943
944 /* interrupt cause table */
945 static int
946 iwm_alloc_ict(struct iwm_softc *sc)
947 {
948         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
949             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
950 }
951
952 static int
953 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
954 {
955         bus_size_t size;
956         int i, error;
957
958         ring->cur = 0;
959
960         /* Allocate RX descriptors (256-byte aligned). */
961         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
962         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
963         if (error != 0) {
964                 device_printf(sc->sc_dev,
965                     "could not allocate RX ring DMA memory\n");
966                 goto fail;
967         }
968         ring->desc = ring->desc_dma.vaddr;
969
970         /* Allocate RX status area (16-byte aligned). */
971         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
972             sizeof(*ring->stat), 16);
973         if (error != 0) {
974                 device_printf(sc->sc_dev,
975                     "could not allocate RX status DMA memory\n");
976                 goto fail;
977         }
978         ring->stat = ring->stat_dma.vaddr;
979
980         /* Create RX buffer DMA tag. */
981         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
982             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
983             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
984         if (error != 0) {
985                 device_printf(sc->sc_dev,
986                     "%s: could not create RX buf DMA tag, error %d\n",
987                     __func__, error);
988                 goto fail;
989         }
990
991         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
992         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
993         if (error != 0) {
994                 device_printf(sc->sc_dev,
995                     "%s: could not create RX buf DMA map, error %d\n",
996                     __func__, error);
997                 goto fail;
998         }
999         /*
1000          * Allocate and map RX buffers.
1001          */
1002         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1003                 struct iwm_rx_data *data = &ring->data[i];
1004                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1005                 if (error != 0) {
1006                         device_printf(sc->sc_dev,
1007                             "%s: could not create RX buf DMA map, error %d\n",
1008                             __func__, error);
1009                         goto fail;
1010                 }
1011                 data->m = NULL;
1012
1013                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1014                         goto fail;
1015                 }
1016         }
1017         return 0;
1018
1019 fail:   iwm_free_rx_ring(sc, ring);
1020         return error;
1021 }
1022
1023 static void
1024 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1025 {
1026         /* Reset the ring state */
1027         ring->cur = 0;
1028
1029         /*
1030          * The hw rx ring index in shared memory must also be cleared,
1031          * otherwise the discrepancy can cause reprocessing chaos.
1032          */
1033         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1034 }
1035
1036 static void
1037 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1038 {
1039         int i;
1040
1041         iwm_dma_contig_free(&ring->desc_dma);
1042         iwm_dma_contig_free(&ring->stat_dma);
1043
1044         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1045                 struct iwm_rx_data *data = &ring->data[i];
1046
1047                 if (data->m != NULL) {
1048                         bus_dmamap_sync(ring->data_dmat, data->map,
1049                             BUS_DMASYNC_POSTREAD);
1050                         bus_dmamap_unload(ring->data_dmat, data->map);
1051                         m_freem(data->m);
1052                         data->m = NULL;
1053                 }
1054                 if (data->map != NULL) {
1055                         bus_dmamap_destroy(ring->data_dmat, data->map);
1056                         data->map = NULL;
1057                 }
1058         }
1059         if (ring->spare_map != NULL) {
1060                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1061                 ring->spare_map = NULL;
1062         }
1063         if (ring->data_dmat != NULL) {
1064                 bus_dma_tag_destroy(ring->data_dmat);
1065                 ring->data_dmat = NULL;
1066         }
1067 }
1068
1069 static int
1070 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1071 {
1072         bus_addr_t paddr;
1073         bus_size_t size;
1074         size_t maxsize;
1075         int nsegments;
1076         int i, error;
1077
1078         ring->qid = qid;
1079         ring->queued = 0;
1080         ring->cur = 0;
1081
1082         /* Allocate TX descriptors (256-byte aligned). */
1083         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1084         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1085         if (error != 0) {
1086                 device_printf(sc->sc_dev,
1087                     "could not allocate TX ring DMA memory\n");
1088                 goto fail;
1089         }
1090         ring->desc = ring->desc_dma.vaddr;
1091
1092         /*
1093          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1094          * to allocate commands space for other rings.
1095          */
1096         if (qid > IWM_MVM_CMD_QUEUE)
1097                 return 0;
1098
1099         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1100         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1101         if (error != 0) {
1102                 device_printf(sc->sc_dev,
1103                     "could not allocate TX cmd DMA memory\n");
1104                 goto fail;
1105         }
1106         ring->cmd = ring->cmd_dma.vaddr;
1107
1108         /* FW commands may require more mapped space than packets. */
1109         if (qid == IWM_MVM_CMD_QUEUE) {
1110                 maxsize = IWM_RBUF_SIZE;
1111                 nsegments = 1;
1112         } else {
1113                 maxsize = MCLBYTES;
1114                 nsegments = IWM_MAX_SCATTER - 2;
1115         }
1116
1117         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1118             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1119             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1120         if (error != 0) {
1121                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1122                 goto fail;
1123         }
1124
1125         paddr = ring->cmd_dma.paddr;
1126         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1127                 struct iwm_tx_data *data = &ring->data[i];
1128
1129                 data->cmd_paddr = paddr;
1130                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1131                     + offsetof(struct iwm_tx_cmd, scratch);
1132                 paddr += sizeof(struct iwm_device_cmd);
1133
1134                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1135                 if (error != 0) {
1136                         device_printf(sc->sc_dev,
1137                             "could not create TX buf DMA map\n");
1138                         goto fail;
1139                 }
1140         }
1141         KASSERT(paddr == ring->cmd_dma.paddr + size,
1142             ("invalid physical address"));
1143         return 0;
1144
1145 fail:   iwm_free_tx_ring(sc, ring);
1146         return error;
1147 }
1148
1149 static void
1150 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1151 {
1152         int i;
1153
1154         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1155                 struct iwm_tx_data *data = &ring->data[i];
1156
1157                 if (data->m != NULL) {
1158                         bus_dmamap_sync(ring->data_dmat, data->map,
1159                             BUS_DMASYNC_POSTWRITE);
1160                         bus_dmamap_unload(ring->data_dmat, data->map);
1161                         m_freem(data->m);
1162                         data->m = NULL;
1163                 }
1164         }
1165         /* Clear TX descriptors. */
1166         memset(ring->desc, 0, ring->desc_dma.size);
1167         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1168             BUS_DMASYNC_PREWRITE);
1169         sc->qfullmsk &= ~(1 << ring->qid);
1170         ring->queued = 0;
1171         ring->cur = 0;
1172
1173         if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1174                 iwm_pcie_clear_cmd_in_flight(sc);
1175 }
1176
1177 static void
1178 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1179 {
1180         int i;
1181
1182         iwm_dma_contig_free(&ring->desc_dma);
1183         iwm_dma_contig_free(&ring->cmd_dma);
1184
1185         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1186                 struct iwm_tx_data *data = &ring->data[i];
1187
1188                 if (data->m != NULL) {
1189                         bus_dmamap_sync(ring->data_dmat, data->map,
1190                             BUS_DMASYNC_POSTWRITE);
1191                         bus_dmamap_unload(ring->data_dmat, data->map);
1192                         m_freem(data->m);
1193                         data->m = NULL;
1194                 }
1195                 if (data->map != NULL) {
1196                         bus_dmamap_destroy(ring->data_dmat, data->map);
1197                         data->map = NULL;
1198                 }
1199         }
1200         if (ring->data_dmat != NULL) {
1201                 bus_dma_tag_destroy(ring->data_dmat);
1202                 ring->data_dmat = NULL;
1203         }
1204 }
1205
1206 /*
1207  * High-level hardware frobbing routines
1208  */
1209
1210 static void
1211 iwm_enable_interrupts(struct iwm_softc *sc)
1212 {
1213         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1214         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1215 }
1216
1217 static void
1218 iwm_restore_interrupts(struct iwm_softc *sc)
1219 {
1220         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1221 }
1222
1223 static void
1224 iwm_disable_interrupts(struct iwm_softc *sc)
1225 {
1226         /* disable interrupts */
1227         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1228
1229         /* acknowledge all interrupts */
1230         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1231         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1232 }
1233
1234 static void
1235 iwm_ict_reset(struct iwm_softc *sc)
1236 {
1237         iwm_disable_interrupts(sc);
1238
1239         /* Reset ICT table. */
1240         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1241         sc->ict_cur = 0;
1242
1243         /* Set physical address of ICT table (4KB aligned). */
1244         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1245             IWM_CSR_DRAM_INT_TBL_ENABLE
1246             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1247             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1248             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1249
1250         /* Switch to ICT interrupt mode in driver. */
1251         sc->sc_flags |= IWM_FLAG_USE_ICT;
1252
1253         /* Re-enable interrupts. */
1254         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1255         iwm_enable_interrupts(sc);
1256 }
1257
1258 /* iwlwifi pcie/trans.c */
1259
1260 /*
1261  * Since this .. hard-resets things, it's time to actually
1262  * mark the first vap (if any) as having no mac context.
1263  * It's annoying, but since the driver is potentially being
1264  * stop/start'ed whilst active (thanks openbsd port!) we
1265  * have to correctly track this.
1266  */
1267 static void
1268 iwm_stop_device(struct iwm_softc *sc)
1269 {
1270         struct ieee80211com *ic = &sc->sc_ic;
1271         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1272         int chnl, qid;
1273         uint32_t mask = 0;
1274
1275         /* tell the device to stop sending interrupts */
1276         iwm_disable_interrupts(sc);
1277
1278         /*
1279          * FreeBSD-local: mark the first vap as not-uploaded,
1280          * so the next transition through auth/assoc
1281          * will correctly populate the MAC context.
1282          */
1283         if (vap) {
1284                 struct iwm_vap *iv = IWM_VAP(vap);
1285                 iv->phy_ctxt = NULL;
1286                 iv->is_uploaded = 0;
1287         }
1288
1289         /* device going down, Stop using ICT table */
1290         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1291
1292         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1293
1294         if (iwm_nic_lock(sc)) {
1295                 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1296
1297                 /* Stop each Tx DMA channel */
1298                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1299                         IWM_WRITE(sc,
1300                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1301                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1302                 }
1303
1304                 /* Wait for DMA channels to be idle */
1305                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1306                     5000)) {
1307                         device_printf(sc->sc_dev,
1308                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1309                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1310                 }
1311                 iwm_nic_unlock(sc);
1312         }
1313         iwm_pcie_rx_stop(sc);
1314
1315         /* Stop RX ring. */
1316         iwm_reset_rx_ring(sc, &sc->rxq);
1317
1318         /* Reset all TX rings. */
1319         for (qid = 0; qid < nitems(sc->txq); qid++)
1320                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1321
1322         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1323                 /* Power-down device's busmaster DMA clocks */
1324                 if (iwm_nic_lock(sc)) {
1325                         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1326                             IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1327                         iwm_nic_unlock(sc);
1328                 }
1329                 DELAY(5);
1330         }
1331
1332         /* Make sure (redundant) we've released our request to stay awake */
1333         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1334             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1335
1336         /* Stop the device, and put it in low power state */
1337         iwm_apm_stop(sc);
1338
1339         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1340          * Clean again the interrupt here
1341          */
1342         iwm_disable_interrupts(sc);
1343         /* stop and reset the on-board processor */
1344         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1345
1346         /*
1347          * Even if we stop the HW, we still want the RF kill
1348          * interrupt
1349          */
1350         iwm_enable_rfkill_int(sc);
1351         iwm_check_rfkill(sc);
1352 }
1353
1354 /* iwlwifi: mvm/ops.c */
1355 static void
1356 iwm_mvm_nic_config(struct iwm_softc *sc)
1357 {
1358         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1359         uint32_t reg_val = 0;
1360         uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1361
1362         radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1363             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1364         radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1365             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1366         radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1367             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1368
1369         /* SKU control */
1370         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1371             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1372         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1373             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1374
1375         /* radio configuration */
1376         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1377         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1378         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1379
1380         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1381
1382         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1383             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1384             radio_cfg_step, radio_cfg_dash);
1385
1386         /*
1387          * W/A : NIC is stuck in a reset state after Early PCIe power off
1388          * (PCIe power is lost before PERST# is asserted), causing ME FW
1389          * to lose ownership and not being able to obtain it back.
1390          */
1391         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1392                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1393                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1394                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1395         }
1396 }
1397
1398 static int
1399 iwm_nic_rx_init(struct iwm_softc *sc)
1400 {
1401         /*
1402          * Initialize RX ring.  This is from the iwn driver.
1403          */
1404         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1405
1406         /* Stop Rx DMA */
1407         iwm_pcie_rx_stop(sc);
1408
1409         if (!iwm_nic_lock(sc))
1410                 return EBUSY;
1411
1412         /* reset and flush pointers */
1413         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1414         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1415         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1416         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1417
1418         /* Set physical address of RX ring (256-byte aligned). */
1419         IWM_WRITE(sc,
1420             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1421
1422         /* Set physical address of RX status (16-byte aligned). */
1423         IWM_WRITE(sc,
1424             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1425
1426         /* Enable Rx DMA
1427          * XXX 5000 HW isn't supported by the iwm(4) driver.
1428          * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1429          *      the credit mechanism in 5000 HW RX FIFO
1430          * Direct rx interrupts to hosts
1431          * Rx buffer size 4 or 8k or 12k
1432          * RB timeout 0x10
1433          * 256 RBDs
1434          */
1435         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1436             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1437             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1438             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1439             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1440             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1441             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1442
1443         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1444
1445         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1446         if (sc->cfg->host_interrupt_operation_mode)
1447                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1448
1449         /*
1450          * Thus sayeth el jefe (iwlwifi) via a comment:
1451          *
1452          * This value should initially be 0 (before preparing any
1453          * RBs), should be 8 after preparing the first 8 RBs (for example)
1454          */
1455         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1456
1457         iwm_nic_unlock(sc);
1458
1459         return 0;
1460 }
1461
1462 static int
1463 iwm_nic_tx_init(struct iwm_softc *sc)
1464 {
1465         int qid;
1466
1467         if (!iwm_nic_lock(sc))
1468                 return EBUSY;
1469
1470         /* Deactivate TX scheduler. */
1471         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1472
1473         /* Set physical address of "keep warm" page (16-byte aligned). */
1474         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1475
1476         /* Initialize TX rings. */
1477         for (qid = 0; qid < nitems(sc->txq); qid++) {
1478                 struct iwm_tx_ring *txq = &sc->txq[qid];
1479
1480                 /* Set physical address of TX ring (256-byte aligned). */
1481                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1482                     txq->desc_dma.paddr >> 8);
1483                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1484                     "%s: loading ring %d descriptors (%p) at %lx\n",
1485                     __func__,
1486                     qid, txq->desc,
1487                     (unsigned long) (txq->desc_dma.paddr >> 8));
1488         }
1489
1490         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1491
1492         iwm_nic_unlock(sc);
1493
1494         return 0;
1495 }
1496
1497 static int
1498 iwm_nic_init(struct iwm_softc *sc)
1499 {
1500         int error;
1501
1502         iwm_apm_init(sc);
1503         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1504                 iwm_set_pwr(sc);
1505
1506         iwm_mvm_nic_config(sc);
1507
1508         if ((error = iwm_nic_rx_init(sc)) != 0)
1509                 return error;
1510
1511         /*
1512          * Ditto for TX, from iwn
1513          */
1514         if ((error = iwm_nic_tx_init(sc)) != 0)
1515                 return error;
1516
1517         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1518             "%s: shadow registers enabled\n", __func__);
1519         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1520
1521         return 0;
1522 }
1523
1524 int
1525 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1526 {
1527         if (!iwm_nic_lock(sc)) {
1528                 device_printf(sc->sc_dev,
1529                     "%s: cannot enable txq %d\n",
1530                     __func__,
1531                     qid);
1532                 return EBUSY;
1533         }
1534
1535         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1536
1537         if (qid == IWM_MVM_CMD_QUEUE) {
1538                 /* unactivate before configuration */
1539                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1540                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1541                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1542
1543                 iwm_nic_unlock(sc);
1544
1545                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1546
1547                 if (!iwm_nic_lock(sc)) {
1548                         device_printf(sc->sc_dev,
1549                             "%s: cannot enable txq %d\n", __func__, qid);
1550                         return EBUSY;
1551                 }
1552                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1553                 iwm_nic_unlock(sc);
1554
1555                 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1556                 /* Set scheduler window size and frame limit. */
1557                 iwm_write_mem32(sc,
1558                     sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1559                     sizeof(uint32_t),
1560                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1561                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1562                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1563                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1564
1565                 if (!iwm_nic_lock(sc)) {
1566                         device_printf(sc->sc_dev,
1567                             "%s: cannot enable txq %d\n", __func__, qid);
1568                         return EBUSY;
1569                 }
1570                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1571                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1572                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1573                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1574                     IWM_SCD_QUEUE_STTS_REG_MSK);
1575         } else {
1576                 struct iwm_scd_txq_cfg_cmd cmd;
1577                 int error;
1578
1579                 iwm_nic_unlock(sc);
1580
1581                 memset(&cmd, 0, sizeof(cmd));
1582                 cmd.scd_queue = qid;
1583                 cmd.enable = 1;
1584                 cmd.sta_id = sta_id;
1585                 cmd.tx_fifo = fifo;
1586                 cmd.aggregate = 0;
1587                 cmd.window = IWM_FRAME_LIMIT;
1588
1589                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1590                     sizeof(cmd), &cmd);
1591                 if (error) {
1592                         device_printf(sc->sc_dev,
1593                             "cannot enable txq %d\n", qid);
1594                         return error;
1595                 }
1596
1597                 if (!iwm_nic_lock(sc))
1598                         return EBUSY;
1599         }
1600
1601         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1602             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1603
1604         iwm_nic_unlock(sc);
1605
1606         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1607             __func__, qid, fifo);
1608
1609         return 0;
1610 }
1611
1612 static int
1613 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1614 {
1615         int error, chnl;
1616
1617         int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1618             IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1619
1620         if (!iwm_nic_lock(sc))
1621                 return EBUSY;
1622
1623         iwm_ict_reset(sc);
1624
1625         sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1626         if (scd_base_addr != 0 &&
1627             scd_base_addr != sc->scd_base_addr) {
1628                 device_printf(sc->sc_dev,
1629                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1630                     __func__, sc->scd_base_addr, scd_base_addr);
1631         }
1632
1633         iwm_nic_unlock(sc);
1634
1635         /* reset context data, TX status and translation data */
1636         error = iwm_write_mem(sc,
1637             sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1638             NULL, clear_dwords);
1639         if (error)
1640                 return EBUSY;
1641
1642         if (!iwm_nic_lock(sc))
1643                 return EBUSY;
1644
1645         /* Set physical address of TX scheduler rings (1KB aligned). */
1646         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1647
1648         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1649
1650         iwm_nic_unlock(sc);
1651
1652         /* enable command channel */
1653         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1654         if (error)
1655                 return error;
1656
1657         if (!iwm_nic_lock(sc))
1658                 return EBUSY;
1659
1660         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1661
1662         /* Enable DMA channels. */
1663         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1664                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1665                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1666                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1667         }
1668
1669         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1670             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1671
1672         iwm_nic_unlock(sc);
1673
1674         /* Enable L1-Active */
1675         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1676                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1677                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1678         }
1679
1680         return error;
1681 }
1682
1683 /*
1684  * NVM read access and content parsing.  We do not support
1685  * external NVM or writing NVM.
1686  * iwlwifi/mvm/nvm.c
1687  */
1688
1689 /* Default NVM size to read */
1690 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1691
1692 #define IWM_NVM_WRITE_OPCODE 1
1693 #define IWM_NVM_READ_OPCODE 0
1694
1695 /* load nvm chunk response */
1696 enum {
1697         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1698         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1699 };
1700
1701 static int
1702 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1703         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1704 {
1705         struct iwm_nvm_access_cmd nvm_access_cmd = {
1706                 .offset = htole16(offset),
1707                 .length = htole16(length),
1708                 .type = htole16(section),
1709                 .op_code = IWM_NVM_READ_OPCODE,
1710         };
1711         struct iwm_nvm_access_resp *nvm_resp;
1712         struct iwm_rx_packet *pkt;
1713         struct iwm_host_cmd cmd = {
1714                 .id = IWM_NVM_ACCESS_CMD,
1715                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1716                 .data = { &nvm_access_cmd, },
1717         };
1718         int ret, bytes_read, offset_read;
1719         uint8_t *resp_data;
1720
1721         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1722
1723         ret = iwm_send_cmd(sc, &cmd);
1724         if (ret) {
1725                 device_printf(sc->sc_dev,
1726                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1727                 return ret;
1728         }
1729
1730         pkt = cmd.resp_pkt;
1731
1732         /* Extract NVM response */
1733         nvm_resp = (void *)pkt->data;
1734         ret = le16toh(nvm_resp->status);
1735         bytes_read = le16toh(nvm_resp->length);
1736         offset_read = le16toh(nvm_resp->offset);
1737         resp_data = nvm_resp->data;
1738         if (ret) {
1739                 if ((offset != 0) &&
1740                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1741                         /*
1742                          * meaning of NOT_VALID_ADDRESS:
1743                          * driver try to read chunk from address that is
1744                          * multiple of 2K and got an error since addr is empty.
1745                          * meaning of (offset != 0): driver already
1746                          * read valid data from another chunk so this case
1747                          * is not an error.
1748                          */
1749                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1750                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1751                                     offset);
1752                         *len = 0;
1753                         ret = 0;
1754                 } else {
1755                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1756                                     "NVM access command failed with status %d\n", ret);
1757                         ret = EIO;
1758                 }
1759                 goto exit;
1760         }
1761
1762         if (offset_read != offset) {
1763                 device_printf(sc->sc_dev,
1764                     "NVM ACCESS response with invalid offset %d\n",
1765                     offset_read);
1766                 ret = EINVAL;
1767                 goto exit;
1768         }
1769
1770         if (bytes_read > length) {
1771                 device_printf(sc->sc_dev,
1772                     "NVM ACCESS response with too much data "
1773                     "(%d bytes requested, %d bytes received)\n",
1774                     length, bytes_read);
1775                 ret = EINVAL;
1776                 goto exit;
1777         }
1778
1779         /* Write data to NVM */
1780         memcpy(data + offset, resp_data, bytes_read);
1781         *len = bytes_read;
1782
1783  exit:
1784         iwm_free_resp(sc, &cmd);
1785         return ret;
1786 }
1787
1788 /*
1789  * Reads an NVM section completely.
1790  * NICs prior to 7000 family don't have a real NVM, but just read
1791  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1792  * by uCode, we need to manually check in this case that we don't
1793  * overflow and try to read more than the EEPROM size.
1794  * For 7000 family NICs, we supply the maximal size we can read, and
1795  * the uCode fills the response with as much data as we can,
1796  * without overflowing, so no check is needed.
1797  */
1798 static int
1799 iwm_nvm_read_section(struct iwm_softc *sc,
1800         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1801 {
1802         uint16_t seglen, length, offset = 0;
1803         int ret;
1804
1805         /* Set nvm section read length */
1806         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1807
1808         seglen = length;
1809
1810         /* Read the NVM until exhausted (reading less than requested) */
1811         while (seglen == length) {
1812                 /* Check no memory assumptions fail and cause an overflow */
1813                 if ((size_read + offset + length) >
1814                     sc->cfg->eeprom_size) {
1815                         device_printf(sc->sc_dev,
1816                             "EEPROM size is too small for NVM\n");
1817                         return ENOBUFS;
1818                 }
1819
1820                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1821                 if (ret) {
1822                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1823                                     "Cannot read NVM from section %d offset %d, length %d\n",
1824                                     section, offset, length);
1825                         return ret;
1826                 }
1827                 offset += seglen;
1828         }
1829
1830         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1831                     "NVM section %d read completed\n", section);
1832         *len = offset;
1833         return 0;
1834 }
1835
1836 /*
1837  * BEGIN IWM_NVM_PARSE
1838  */
1839
1840 /* iwlwifi/iwl-nvm-parse.c */
1841
1842 /* NVM offsets (in words) definitions */
1843 enum iwm_nvm_offsets {
1844         /* NVM HW-Section offset (in words) definitions */
1845         IWM_HW_ADDR = 0x15,
1846
1847 /* NVM SW-Section offset (in words) definitions */
1848         IWM_NVM_SW_SECTION = 0x1C0,
1849         IWM_NVM_VERSION = 0,
1850         IWM_RADIO_CFG = 1,
1851         IWM_SKU = 2,
1852         IWM_N_HW_ADDRS = 3,
1853         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1854
1855 /* NVM calibration section offset (in words) definitions */
1856         IWM_NVM_CALIB_SECTION = 0x2B8,
1857         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1858 };
1859
1860 enum iwm_8000_nvm_offsets {
1861         /* NVM HW-Section offset (in words) definitions */
1862         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1863         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1864         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1865         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1866         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1867
1868         /* NVM SW-Section offset (in words) definitions */
1869         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1870         IWM_NVM_VERSION_8000 = 0,
1871         IWM_RADIO_CFG_8000 = 0,
1872         IWM_SKU_8000 = 2,
1873         IWM_N_HW_ADDRS_8000 = 3,
1874
1875         /* NVM REGULATORY -Section offset (in words) definitions */
1876         IWM_NVM_CHANNELS_8000 = 0,
1877         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1878         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1879         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1880
1881         /* NVM calibration section offset (in words) definitions */
1882         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1883         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1884 };
1885
1886 /* SKU Capabilities (actual values from NVM definition) */
1887 enum nvm_sku_bits {
1888         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1889         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1890         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1891         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1892 };
1893
1894 /* radio config bits (actual values from NVM definition) */
1895 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1896 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1897 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1898 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1899 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1900 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1901
1902 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1903 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1904 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1905 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1906 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1907 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1908
1909 #define DEFAULT_MAX_TX_POWER 16
1910
1911 /**
1912  * enum iwm_nvm_channel_flags - channel flags in NVM
1913  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1914  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1915  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1916  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1917  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1918  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1919  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1920  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1921  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1922  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1923  */
1924 enum iwm_nvm_channel_flags {
1925         IWM_NVM_CHANNEL_VALID = (1 << 0),
1926         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1927         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1928         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1929         IWM_NVM_CHANNEL_DFS = (1 << 7),
1930         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1931         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1932         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1933         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1934 };
1935
1936 /*
1937  * Translate EEPROM flags to net80211.
1938  */
1939 static uint32_t
1940 iwm_eeprom_channel_flags(uint16_t ch_flags)
1941 {
1942         uint32_t nflags;
1943
1944         nflags = 0;
1945         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1946                 nflags |= IEEE80211_CHAN_PASSIVE;
1947         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1948                 nflags |= IEEE80211_CHAN_NOADHOC;
1949         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1950                 nflags |= IEEE80211_CHAN_DFS;
1951                 /* Just in case. */
1952                 nflags |= IEEE80211_CHAN_NOADHOC;
1953         }
1954
1955         return (nflags);
1956 }
1957
1958 static void
1959 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1960     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1961     const uint8_t bands[])
1962 {
1963         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1964         uint32_t nflags;
1965         uint16_t ch_flags;
1966         uint8_t ieee;
1967         int error;
1968
1969         for (; ch_idx < ch_num; ch_idx++) {
1970                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1971                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1972                         ieee = iwm_nvm_channels[ch_idx];
1973                 else
1974                         ieee = iwm_nvm_channels_8000[ch_idx];
1975
1976                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1977                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1978                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
1979                             ieee, ch_flags,
1980                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1981                             "5.2" : "2.4");
1982                         continue;
1983                 }
1984
1985                 nflags = iwm_eeprom_channel_flags(ch_flags);
1986                 error = ieee80211_add_channel(chans, maxchans, nchans,
1987                     ieee, 0, 0, nflags, bands);
1988                 if (error != 0)
1989                         break;
1990
1991                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1992                     "Ch. %d Flags %x [%sGHz] - Added\n",
1993                     ieee, ch_flags,
1994                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1995                     "5.2" : "2.4");
1996         }
1997 }
1998
1999 static void
2000 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2001     struct ieee80211_channel chans[])
2002 {
2003         struct iwm_softc *sc = ic->ic_softc;
2004         struct iwm_nvm_data *data = sc->nvm_data;
2005         uint8_t bands[IEEE80211_MODE_BYTES];
2006         size_t ch_num;
2007
2008         memset(bands, 0, sizeof(bands));
2009         /* 1-13: 11b/g channels. */
2010         setbit(bands, IEEE80211_MODE_11B);
2011         setbit(bands, IEEE80211_MODE_11G);
2012         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2013             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2014
2015         /* 14: 11b channel only. */
2016         clrbit(bands, IEEE80211_MODE_11G);
2017         iwm_add_channel_band(sc, chans, maxchans, nchans,
2018             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2019
2020         if (data->sku_cap_band_52GHz_enable) {
2021                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2022                         ch_num = nitems(iwm_nvm_channels);
2023                 else
2024                         ch_num = nitems(iwm_nvm_channels_8000);
2025                 memset(bands, 0, sizeof(bands));
2026                 setbit(bands, IEEE80211_MODE_11A);
2027                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2028                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2029         }
2030 }
2031
2032 static void
2033 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2034         const uint16_t *mac_override, const uint16_t *nvm_hw)
2035 {
2036         const uint8_t *hw_addr;
2037
2038         if (mac_override) {
2039                 static const uint8_t reserved_mac[] = {
2040                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2041                 };
2042
2043                 hw_addr = (const uint8_t *)(mac_override +
2044                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2045
2046                 /*
2047                  * Store the MAC address from MAO section.
2048                  * No byte swapping is required in MAO section
2049                  */
2050                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2051
2052                 /*
2053                  * Force the use of the OTP MAC address in case of reserved MAC
2054                  * address in the NVM, or if address is given but invalid.
2055                  */
2056                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2057                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2058                     iwm_is_valid_ether_addr(data->hw_addr) &&
2059                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2060                         return;
2061
2062                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2063                     "%s: mac address from nvm override section invalid\n",
2064                     __func__);
2065         }
2066
2067         if (nvm_hw) {
2068                 /* read the mac address from WFMP registers */
2069                 uint32_t mac_addr0 =
2070                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2071                 uint32_t mac_addr1 =
2072                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2073
2074                 hw_addr = (const uint8_t *)&mac_addr0;
2075                 data->hw_addr[0] = hw_addr[3];
2076                 data->hw_addr[1] = hw_addr[2];
2077                 data->hw_addr[2] = hw_addr[1];
2078                 data->hw_addr[3] = hw_addr[0];
2079
2080                 hw_addr = (const uint8_t *)&mac_addr1;
2081                 data->hw_addr[4] = hw_addr[1];
2082                 data->hw_addr[5] = hw_addr[0];
2083
2084                 return;
2085         }
2086
2087         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2088         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2089 }
2090
2091 static int
2092 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2093             const uint16_t *phy_sku)
2094 {
2095         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2096                 return le16_to_cpup(nvm_sw + IWM_SKU);
2097
2098         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2099 }
2100
2101 static int
2102 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2103 {
2104         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2105                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2106         else
2107                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2108                                                 IWM_NVM_VERSION_8000));
2109 }
2110
2111 static int
2112 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2113                   const uint16_t *phy_sku)
2114 {
2115         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2116                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2117
2118         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2119 }
2120
2121 static int
2122 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2123 {
2124         int n_hw_addr;
2125
2126         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2127                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2128
2129         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2130
2131         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2132 }
2133
2134 static void
2135 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2136                   uint32_t radio_cfg)
2137 {
2138         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2139                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2140                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2141                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2142                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2143                 return;
2144         }
2145
2146         /* set the radio configuration for family 8000 */
2147         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2148         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2149         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2150         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2151         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2152         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2153 }
2154
2155 static int
2156 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2157                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2158 {
2159 #ifdef notyet /* for FAMILY 9000 */
2160         if (cfg->mac_addr_from_csr) {
2161                 iwm_set_hw_address_from_csr(sc, data);
2162         } else
2163 #endif
2164         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2165                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2166
2167                 /* The byte order is little endian 16 bit, meaning 214365 */
2168                 data->hw_addr[0] = hw_addr[1];
2169                 data->hw_addr[1] = hw_addr[0];
2170                 data->hw_addr[2] = hw_addr[3];
2171                 data->hw_addr[3] = hw_addr[2];
2172                 data->hw_addr[4] = hw_addr[5];
2173                 data->hw_addr[5] = hw_addr[4];
2174         } else {
2175                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2176         }
2177
2178         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2179                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2180                 return EINVAL;
2181         }
2182
2183         return 0;
2184 }
2185
2186 static struct iwm_nvm_data *
2187 iwm_parse_nvm_data(struct iwm_softc *sc,
2188                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2189                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2190                    const uint16_t *phy_sku, const uint16_t *regulatory)
2191 {
2192         struct iwm_nvm_data *data;
2193         uint32_t sku, radio_cfg;
2194         uint16_t lar_config;
2195
2196         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2197                 data = malloc(sizeof(*data) +
2198                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2199                     M_DEVBUF, M_NOWAIT | M_ZERO);
2200         } else {
2201                 data = malloc(sizeof(*data) +
2202                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2203                     M_DEVBUF, M_NOWAIT | M_ZERO);
2204         }
2205         if (!data)
2206                 return NULL;
2207
2208         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2209
2210         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2211         iwm_set_radio_cfg(sc, data, radio_cfg);
2212
2213         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2214         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2215         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2216         data->sku_cap_11n_enable = 0;
2217
2218         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2219
2220         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2221                 uint16_t lar_offset = data->nvm_version < 0xE39 ?
2222                                        IWM_NVM_LAR_OFFSET_8000_OLD :
2223                                        IWM_NVM_LAR_OFFSET_8000;
2224
2225                 lar_config = le16_to_cpup(regulatory + lar_offset);
2226                 data->lar_enabled = !!(lar_config &
2227                                        IWM_NVM_LAR_ENABLED_8000);
2228         }
2229
2230         /* If no valid mac address was found - bail out */
2231         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2232                 free(data, M_DEVBUF);
2233                 return NULL;
2234         }
2235
2236         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2237                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2238                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2239         } else {
2240                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2241                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2242         }
2243
2244         return data;
2245 }
2246
2247 static void
2248 iwm_free_nvm_data(struct iwm_nvm_data *data)
2249 {
2250         if (data != NULL)
2251                 free(data, M_DEVBUF);
2252 }
2253
2254 static struct iwm_nvm_data *
2255 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2256 {
2257         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2258
2259         /* Checking for required sections */
2260         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2261                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2262                     !sections[sc->cfg->nvm_hw_section_num].data) {
2263                         device_printf(sc->sc_dev,
2264                             "Can't parse empty OTP/NVM sections\n");
2265                         return NULL;
2266                 }
2267         } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2268                 /* SW and REGULATORY sections are mandatory */
2269                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2270                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2271                         device_printf(sc->sc_dev,
2272                             "Can't parse empty OTP/NVM sections\n");
2273                         return NULL;
2274                 }
2275                 /* MAC_OVERRIDE or at least HW section must exist */
2276                 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2277                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2278                         device_printf(sc->sc_dev,
2279                             "Can't parse mac_address, empty sections\n");
2280                         return NULL;
2281                 }
2282
2283                 /* PHY_SKU section is mandatory in B0 */
2284                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2285                         device_printf(sc->sc_dev,
2286                             "Can't parse phy_sku in B0, empty sections\n");
2287                         return NULL;
2288                 }
2289         } else {
2290                 panic("unknown device family %d\n", sc->cfg->device_family);
2291         }
2292
2293         hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2294         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2295         calib = (const uint16_t *)
2296             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2297         regulatory = (const uint16_t *)
2298             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2299         mac_override = (const uint16_t *)
2300             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2301         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2302
2303         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2304             phy_sku, regulatory);
2305 }
2306
2307 static int
2308 iwm_nvm_init(struct iwm_softc *sc)
2309 {
2310         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2311         int i, ret, section;
2312         uint32_t size_read = 0;
2313         uint8_t *nvm_buffer, *temp;
2314         uint16_t len;
2315
2316         memset(nvm_sections, 0, sizeof(nvm_sections));
2317
2318         if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2319                 return EINVAL;
2320
2321         /* load NVM values from nic */
2322         /* Read From FW NVM */
2323         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2324
2325         nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2326         if (!nvm_buffer)
2327                 return ENOMEM;
2328         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2329                 /* we override the constness for initial read */
2330                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2331                                            &len, size_read);
2332                 if (ret)
2333                         continue;
2334                 size_read += len;
2335                 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2336                 if (!temp) {
2337                         ret = ENOMEM;
2338                         break;
2339                 }
2340                 memcpy(temp, nvm_buffer, len);
2341
2342                 nvm_sections[section].data = temp;
2343                 nvm_sections[section].length = len;
2344         }
2345         if (!size_read)
2346                 device_printf(sc->sc_dev, "OTP is blank\n");
2347         free(nvm_buffer, M_DEVBUF);
2348
2349         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2350         if (!sc->nvm_data)
2351                 return EINVAL;
2352         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2353                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2354
2355         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2356                 if (nvm_sections[i].data != NULL)
2357                         free(nvm_sections[i].data, M_DEVBUF);
2358         }
2359
2360         return 0;
2361 }
2362
2363 static int
2364 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2365         const struct iwm_fw_desc *section)
2366 {
2367         struct iwm_dma_info *dma = &sc->fw_dma;
2368         uint8_t *v_addr;
2369         bus_addr_t p_addr;
2370         uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2371         int ret = 0;
2372
2373         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2374                     "%s: [%d] uCode section being loaded...\n",
2375                     __func__, section_num);
2376
2377         v_addr = dma->vaddr;
2378         p_addr = dma->paddr;
2379
2380         for (offset = 0; offset < section->len; offset += chunk_sz) {
2381                 uint32_t copy_size, dst_addr;
2382                 int extended_addr = FALSE;
2383
2384                 copy_size = MIN(chunk_sz, section->len - offset);
2385                 dst_addr = section->offset + offset;
2386
2387                 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2388                     dst_addr <= IWM_FW_MEM_EXTENDED_END)
2389                         extended_addr = TRUE;
2390
2391                 if (extended_addr)
2392                         iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2393                                           IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2394
2395                 memcpy(v_addr, (const uint8_t *)section->data + offset,
2396                     copy_size);
2397                 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2398                 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2399                                                    copy_size);
2400
2401                 if (extended_addr)
2402                         iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2403                                             IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2404
2405                 if (ret) {
2406                         device_printf(sc->sc_dev,
2407                             "%s: Could not load the [%d] uCode section\n",
2408                             __func__, section_num);
2409                         break;
2410                 }
2411         }
2412
2413         return ret;
2414 }
2415
2416 /*
2417  * ucode
2418  */
2419 static int
2420 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2421                              bus_addr_t phy_addr, uint32_t byte_cnt)
2422 {
2423         int ret;
2424
2425         sc->sc_fw_chunk_done = 0;
2426
2427         if (!iwm_nic_lock(sc))
2428                 return EBUSY;
2429
2430         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2431             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2432
2433         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2434             dst_addr);
2435
2436         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2437             phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2438
2439         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2440             (iwm_get_dma_hi_addr(phy_addr)
2441              << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2442
2443         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2444             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2445             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2446             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2447
2448         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2449             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2450             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2451             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2452
2453         iwm_nic_unlock(sc);
2454
2455         /* wait up to 5s for this segment to load */
2456         ret = 0;
2457         while (!sc->sc_fw_chunk_done) {
2458                 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2459                 if (ret)
2460                         break;
2461         }
2462
2463         if (ret != 0) {
2464                 device_printf(sc->sc_dev,
2465                     "fw chunk addr 0x%x len %d failed to load\n",
2466                     dst_addr, byte_cnt);
2467                 return ETIMEDOUT;
2468         }
2469
2470         return 0;
2471 }
2472
2473 static int
2474 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2475         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2476 {
2477         int shift_param;
2478         int i, ret = 0, sec_num = 0x1;
2479         uint32_t val, last_read_idx = 0;
2480
2481         if (cpu == 1) {
2482                 shift_param = 0;
2483                 *first_ucode_section = 0;
2484         } else {
2485                 shift_param = 16;
2486                 (*first_ucode_section)++;
2487         }
2488
2489         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2490                 last_read_idx = i;
2491
2492                 /*
2493                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2494                  * CPU1 to CPU2.
2495                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2496                  * CPU2 non paged to CPU2 paging sec.
2497                  */
2498                 if (!image->fw_sect[i].data ||
2499                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2500                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2501                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2502                                     "Break since Data not valid or Empty section, sec = %d\n",
2503                                     i);
2504                         break;
2505                 }
2506                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2507                 if (ret)
2508                         return ret;
2509
2510                 /* Notify the ucode of the loaded section number and status */
2511                 if (iwm_nic_lock(sc)) {
2512                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2513                         val = val | (sec_num << shift_param);
2514                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2515                         sec_num = (sec_num << 1) | 0x1;
2516                         iwm_nic_unlock(sc);
2517                 }
2518         }
2519
2520         *first_ucode_section = last_read_idx;
2521
2522         iwm_enable_interrupts(sc);
2523
2524         if (iwm_nic_lock(sc)) {
2525                 if (cpu == 1)
2526                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2527                 else
2528                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2529                 iwm_nic_unlock(sc);
2530         }
2531
2532         return 0;
2533 }
2534
2535 static int
2536 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2537         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2538 {
2539         int shift_param;
2540         int i, ret = 0;
2541         uint32_t last_read_idx = 0;
2542
2543         if (cpu == 1) {
2544                 shift_param = 0;
2545                 *first_ucode_section = 0;
2546         } else {
2547                 shift_param = 16;
2548                 (*first_ucode_section)++;
2549         }
2550
2551         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2552                 last_read_idx = i;
2553
2554                 /*
2555                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2556                  * CPU1 to CPU2.
2557                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2558                  * CPU2 non paged to CPU2 paging sec.
2559                  */
2560                 if (!image->fw_sect[i].data ||
2561                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2562                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2563                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2564                                     "Break since Data not valid or Empty section, sec = %d\n",
2565                                      i);
2566                         break;
2567                 }
2568
2569                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2570                 if (ret)
2571                         return ret;
2572         }
2573
2574         *first_ucode_section = last_read_idx;
2575
2576         return 0;
2577
2578 }
2579
2580 static int
2581 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2582         const struct iwm_fw_sects *image)
2583 {
2584         int ret = 0;
2585         int first_ucode_section;
2586
2587         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2588                      image->is_dual_cpus ? "Dual" : "Single");
2589
2590         /* load to FW the binary non secured sections of CPU1 */
2591         ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2592         if (ret)
2593                 return ret;
2594
2595         if (image->is_dual_cpus) {
2596                 /* set CPU2 header address */
2597                 if (iwm_nic_lock(sc)) {
2598                         iwm_write_prph(sc,
2599                                        IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2600                                        IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2601                         iwm_nic_unlock(sc);
2602                 }
2603
2604                 /* load to FW the binary sections of CPU2 */
2605                 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2606                                                  &first_ucode_section);
2607                 if (ret)
2608                         return ret;
2609         }
2610
2611         iwm_enable_interrupts(sc);
2612
2613         /* release CPU reset */
2614         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2615
2616         return 0;
2617 }
2618
2619 int
2620 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2621         const struct iwm_fw_sects *image)
2622 {
2623         int ret = 0;
2624         int first_ucode_section;
2625
2626         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2627                     image->is_dual_cpus ? "Dual" : "Single");
2628
2629         /* configure the ucode to be ready to get the secured image */
2630         /* release CPU reset */
2631         if (iwm_nic_lock(sc)) {
2632                 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2633                     IWM_RELEASE_CPU_RESET_BIT);
2634                 iwm_nic_unlock(sc);
2635         }
2636
2637         /* load to FW the binary Secured sections of CPU1 */
2638         ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2639             &first_ucode_section);
2640         if (ret)
2641                 return ret;
2642
2643         /* load to FW the binary sections of CPU2 */
2644         return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2645             &first_ucode_section);
2646 }
2647
2648 /* XXX Get rid of this definition */
2649 static inline void
2650 iwm_enable_fw_load_int(struct iwm_softc *sc)
2651 {
2652         IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2653         sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2654         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2655 }
2656
2657 /* XXX Add proper rfkill support code */
2658 static int
2659 iwm_start_fw(struct iwm_softc *sc,
2660         const struct iwm_fw_sects *fw)
2661 {
2662         int ret;
2663
2664         /* This may fail if AMT took ownership of the device */
2665         if (iwm_prepare_card_hw(sc)) {
2666                 device_printf(sc->sc_dev,
2667                     "%s: Exit HW not ready\n", __func__);
2668                 ret = EIO;
2669                 goto out;
2670         }
2671
2672         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2673
2674         iwm_disable_interrupts(sc);
2675
2676         /* make sure rfkill handshake bits are cleared */
2677         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2678         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2679             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2680
2681         /* clear (again), then enable host interrupts */
2682         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2683
2684         ret = iwm_nic_init(sc);
2685         if (ret) {
2686                 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2687                 goto out;
2688         }
2689
2690         /*
2691          * Now, we load the firmware and don't want to be interrupted, even
2692          * by the RF-Kill interrupt (hence mask all the interrupt besides the
2693          * FH_TX interrupt which is needed to load the firmware). If the
2694          * RF-Kill switch is toggled, we will find out after having loaded
2695          * the firmware and return the proper value to the caller.
2696          */
2697         iwm_enable_fw_load_int(sc);
2698
2699         /* really make sure rfkill handshake bits are cleared */
2700         /* maybe we should write a few times more?  just to make sure */
2701         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2702         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2703
2704         /* Load the given image to the HW */
2705         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2706                 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2707         else
2708                 ret = iwm_pcie_load_given_ucode(sc, fw);
2709
2710         /* XXX re-check RF-Kill state */
2711
2712 out:
2713         return ret;
2714 }
2715
2716 static int
2717 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2718 {
2719         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2720                 .valid = htole32(valid_tx_ant),
2721         };
2722
2723         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2724             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2725 }
2726
2727 /* iwlwifi: mvm/fw.c */
2728 static int
2729 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2730 {
2731         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2732         enum iwm_ucode_type ucode_type = sc->cur_ucode;
2733
2734         /* Set parameters */
2735         phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2736         phy_cfg_cmd.calib_control.event_trigger =
2737             sc->sc_default_calib[ucode_type].event_trigger;
2738         phy_cfg_cmd.calib_control.flow_trigger =
2739             sc->sc_default_calib[ucode_type].flow_trigger;
2740
2741         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2742             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2743         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2744             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2745 }
2746
2747 static int
2748 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2749 {
2750         struct iwm_mvm_alive_data *alive_data = data;
2751         struct iwm_mvm_alive_resp_ver1 *palive1;
2752         struct iwm_mvm_alive_resp_ver2 *palive2;
2753         struct iwm_mvm_alive_resp *palive;
2754
2755         if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2756                 palive1 = (void *)pkt->data;
2757
2758                 sc->support_umac_log = FALSE;
2759                 sc->error_event_table =
2760                         le32toh(palive1->error_event_table_ptr);
2761                 sc->log_event_table =
2762                         le32toh(palive1->log_event_table_ptr);
2763                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2764
2765                 alive_data->valid = le16toh(palive1->status) ==
2766                                     IWM_ALIVE_STATUS_OK;
2767                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2768                             "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2769                              le16toh(palive1->status), palive1->ver_type,
2770                              palive1->ver_subtype, palive1->flags);
2771         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2772                 palive2 = (void *)pkt->data;
2773                 sc->error_event_table =
2774                         le32toh(palive2->error_event_table_ptr);
2775                 sc->log_event_table =
2776                         le32toh(palive2->log_event_table_ptr);
2777                 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2778                 sc->umac_error_event_table =
2779                         le32toh(palive2->error_info_addr);
2780
2781                 alive_data->valid = le16toh(palive2->status) ==
2782                                     IWM_ALIVE_STATUS_OK;
2783                 if (sc->umac_error_event_table)
2784                         sc->support_umac_log = TRUE;
2785
2786                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2787                             "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2788                             le16toh(palive2->status), palive2->ver_type,
2789                             palive2->ver_subtype, palive2->flags);
2790
2791                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2792                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2793                             palive2->umac_major, palive2->umac_minor);
2794         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2795                 palive = (void *)pkt->data;
2796
2797                 sc->error_event_table =
2798                         le32toh(palive->error_event_table_ptr);
2799                 sc->log_event_table =
2800                         le32toh(palive->log_event_table_ptr);
2801                 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2802                 sc->umac_error_event_table =
2803                         le32toh(palive->error_info_addr);
2804
2805                 alive_data->valid = le16toh(palive->status) ==
2806                                     IWM_ALIVE_STATUS_OK;
2807                 if (sc->umac_error_event_table)
2808                         sc->support_umac_log = TRUE;
2809
2810                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2811                             "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2812                             le16toh(palive->status), palive->ver_type,
2813                             palive->ver_subtype, palive->flags);
2814
2815                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2816                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2817                             le32toh(palive->umac_major),
2818                             le32toh(palive->umac_minor));
2819         }
2820
2821         return TRUE;
2822 }
2823
2824 static int
2825 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2826         struct iwm_rx_packet *pkt, void *data)
2827 {
2828         struct iwm_phy_db *phy_db = data;
2829
2830         if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2831                 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2832                         device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2833                             __func__, pkt->hdr.code);
2834                 }
2835                 return TRUE;
2836         }
2837
2838         if (iwm_phy_db_set_section(phy_db, pkt)) {
2839                 device_printf(sc->sc_dev,
2840                     "%s: iwm_phy_db_set_section failed\n", __func__);
2841         }
2842
2843         return FALSE;
2844 }
2845
2846 static int
2847 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2848         enum iwm_ucode_type ucode_type)
2849 {
2850         struct iwm_notification_wait alive_wait;
2851         struct iwm_mvm_alive_data alive_data;
2852         const struct iwm_fw_sects *fw;
2853         enum iwm_ucode_type old_type = sc->cur_ucode;
2854         int error;
2855         static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2856
2857         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2858                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2859                         error);
2860                 return error;
2861         }
2862         fw = &sc->sc_fw.fw_sects[ucode_type];
2863         sc->cur_ucode = ucode_type;
2864         sc->ucode_loaded = FALSE;
2865
2866         memset(&alive_data, 0, sizeof(alive_data));
2867         iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2868                                    alive_cmd, nitems(alive_cmd),
2869                                    iwm_alive_fn, &alive_data);
2870
2871         error = iwm_start_fw(sc, fw);
2872         if (error) {
2873                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2874                 sc->cur_ucode = old_type;
2875                 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2876                 return error;
2877         }
2878
2879         /*
2880          * Some things may run in the background now, but we
2881          * just wait for the ALIVE notification here.
2882          */
2883         IWM_UNLOCK(sc);
2884         error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2885                                       IWM_MVM_UCODE_ALIVE_TIMEOUT);
2886         IWM_LOCK(sc);
2887         if (error) {
2888                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2889                         uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2890                         if (iwm_nic_lock(sc)) {
2891                                 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2892                                 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2893                                 iwm_nic_unlock(sc);
2894                         }
2895                         device_printf(sc->sc_dev,
2896                             "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2897                             a, b);
2898                 }
2899                 sc->cur_ucode = old_type;
2900                 return error;
2901         }
2902
2903         if (!alive_data.valid) {
2904                 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2905                     __func__);
2906                 sc->cur_ucode = old_type;
2907                 return EIO;
2908         }
2909
2910         iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2911
2912         /*
2913          * configure and operate fw paging mechanism.
2914          * driver configures the paging flow only once, CPU2 paging image
2915          * included in the IWM_UCODE_INIT image.
2916          */
2917         if (fw->paging_mem_size) {
2918                 error = iwm_save_fw_paging(sc, fw);
2919                 if (error) {
2920                         device_printf(sc->sc_dev,
2921                             "%s: failed to save the FW paging image\n",
2922                             __func__);
2923                         return error;
2924                 }
2925
2926                 error = iwm_send_paging_cmd(sc, fw);
2927                 if (error) {
2928                         device_printf(sc->sc_dev,
2929                             "%s: failed to send the paging cmd\n", __func__);
2930                         iwm_free_fw_paging(sc);
2931                         return error;
2932                 }
2933         }
2934
2935         if (!error)
2936                 sc->ucode_loaded = TRUE;
2937         return error;
2938 }
2939
2940 /*
2941  * mvm misc bits
2942  */
2943
2944 /*
2945  * follows iwlwifi/fw.c
2946  */
2947 static int
2948 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2949 {
2950         struct iwm_notification_wait calib_wait;
2951         static const uint16_t init_complete[] = {
2952                 IWM_INIT_COMPLETE_NOTIF,
2953                 IWM_CALIB_RES_NOTIF_PHY_DB
2954         };
2955         int ret;
2956
2957         /* do not operate with rfkill switch turned on */
2958         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2959                 device_printf(sc->sc_dev,
2960                     "radio is disabled by hardware switch\n");
2961                 return EPERM;
2962         }
2963
2964         iwm_init_notification_wait(sc->sc_notif_wait,
2965                                    &calib_wait,
2966                                    init_complete,
2967                                    nitems(init_complete),
2968                                    iwm_wait_phy_db_entry,
2969                                    sc->sc_phy_db);
2970
2971         /* Will also start the device */
2972         ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2973         if (ret) {
2974                 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2975                     ret);
2976                 goto error;
2977         }
2978
2979         if (justnvm) {
2980                 /* Read nvm */
2981                 ret = iwm_nvm_init(sc);
2982                 if (ret) {
2983                         device_printf(sc->sc_dev, "failed to read nvm\n");
2984                         goto error;
2985                 }
2986                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2987                 goto error;
2988         }
2989
2990         ret = iwm_send_bt_init_conf(sc);
2991         if (ret) {
2992                 device_printf(sc->sc_dev,
2993                     "failed to send bt coex configuration: %d\n", ret);
2994                 goto error;
2995         }
2996
2997         /* Send TX valid antennas before triggering calibrations */
2998         ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
2999         if (ret) {
3000                 device_printf(sc->sc_dev,
3001                     "failed to send antennas before calibration: %d\n", ret);
3002                 goto error;
3003         }
3004
3005         /*
3006          * Send phy configurations command to init uCode
3007          * to start the 16.0 uCode init image internal calibrations.
3008          */
3009         ret = iwm_send_phy_cfg_cmd(sc);
3010         if (ret) {
3011                 device_printf(sc->sc_dev,
3012                     "%s: Failed to run INIT calibrations: %d\n",
3013                     __func__, ret);
3014                 goto error;
3015         }
3016
3017         /*
3018          * Nothing to do but wait for the init complete notification
3019          * from the firmware.
3020          */
3021         IWM_UNLOCK(sc);
3022         ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3023             IWM_MVM_UCODE_CALIB_TIMEOUT);
3024         IWM_LOCK(sc);
3025
3026
3027         goto out;
3028
3029 error:
3030         iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3031 out:
3032         return ret;
3033 }
3034
3035 /*
3036  * receive side
3037  */
3038
3039 /* (re)stock rx ring, called at init-time and at runtime */
3040 static int
3041 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3042 {
3043         struct iwm_rx_ring *ring = &sc->rxq;
3044         struct iwm_rx_data *data = &ring->data[idx];
3045         struct mbuf *m;
3046         bus_dmamap_t dmamap;
3047         bus_dma_segment_t seg;
3048         int nsegs, error;
3049
3050         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3051         if (m == NULL)
3052                 return ENOBUFS;
3053
3054         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3055         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3056             &seg, &nsegs, BUS_DMA_NOWAIT);
3057         if (error != 0) {
3058                 device_printf(sc->sc_dev,
3059                     "%s: can't map mbuf, error %d\n", __func__, error);
3060                 m_freem(m);
3061                 return error;
3062         }
3063
3064         if (data->m != NULL)
3065                 bus_dmamap_unload(ring->data_dmat, data->map);
3066
3067         /* Swap ring->spare_map with data->map */
3068         dmamap = data->map;
3069         data->map = ring->spare_map;
3070         ring->spare_map = dmamap;
3071
3072         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3073         data->m = m;
3074
3075         /* Update RX descriptor. */
3076         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3077         ring->desc[idx] = htole32(seg.ds_addr >> 8);
3078         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3079             BUS_DMASYNC_PREWRITE);
3080
3081         return 0;
3082 }
3083
3084 /* iwlwifi: mvm/rx.c */
3085 /*
3086  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3087  * values are reported by the fw as positive values - need to negate
3088  * to obtain their dBM.  Account for missing antennas by replacing 0
3089  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3090  */
3091 static int
3092 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3093 {
3094         int energy_a, energy_b, energy_c, max_energy;
3095         uint32_t val;
3096
3097         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3098         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3099             IWM_RX_INFO_ENERGY_ANT_A_POS;
3100         energy_a = energy_a ? -energy_a : -256;
3101         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3102             IWM_RX_INFO_ENERGY_ANT_B_POS;
3103         energy_b = energy_b ? -energy_b : -256;
3104         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3105             IWM_RX_INFO_ENERGY_ANT_C_POS;
3106         energy_c = energy_c ? -energy_c : -256;
3107         max_energy = MAX(energy_a, energy_b);
3108         max_energy = MAX(max_energy, energy_c);
3109
3110         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3111             "energy In A %d B %d C %d , and max %d\n",
3112             energy_a, energy_b, energy_c, max_energy);
3113
3114         return max_energy;
3115 }
3116
3117 static void
3118 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3119 {
3120         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3121
3122         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3123
3124         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3125 }
3126
3127 /*
3128  * Retrieve the average noise (in dBm) among receivers.
3129  */
3130 static int
3131 iwm_get_noise(struct iwm_softc *sc,
3132     const struct iwm_mvm_statistics_rx_non_phy *stats)
3133 {
3134         int i, total, nbant, noise;
3135
3136         total = nbant = noise = 0;
3137         for (i = 0; i < 3; i++) {
3138                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3139                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3140                     __func__,
3141                     i,
3142                     noise);
3143
3144                 if (noise) {
3145                         total += noise;
3146                         nbant++;
3147                 }
3148         }
3149
3150         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3151             __func__, nbant, total);
3152 #if 0
3153         /* There should be at least one antenna but check anyway. */
3154         return (nbant == 0) ? -127 : (total / nbant) - 107;
3155 #else
3156         /* For now, just hard-code it to -96 to be safe */
3157         return (-96);
3158 #endif
3159 }
3160
3161 /*
3162  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3163  *
3164  * Handles the actual data of the Rx packet from the fw
3165  */
3166 static boolean_t
3167 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3168         boolean_t stolen)
3169 {
3170         struct ieee80211com *ic = &sc->sc_ic;
3171         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3172         struct ieee80211_frame *wh;
3173         struct ieee80211_node *ni;
3174         struct ieee80211_rx_stats rxs;
3175         struct iwm_rx_phy_info *phy_info;
3176         struct iwm_rx_mpdu_res_start *rx_res;
3177         struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3178         uint32_t len;
3179         uint32_t rx_pkt_status;
3180         int rssi;
3181
3182         phy_info = &sc->sc_last_phy_info;
3183         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3184         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3185         len = le16toh(rx_res->byte_count);
3186         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3187
3188         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3189                 device_printf(sc->sc_dev,
3190                     "dsp size out of range [0,20]: %d\n",
3191                     phy_info->cfg_phy_cnt);
3192                 goto fail;
3193         }
3194
3195         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3196             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3197                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3198                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3199                 goto fail;
3200         }
3201
3202         rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3203
3204         /* Map it to relative value */
3205         rssi = rssi - sc->sc_noise;
3206
3207         /* replenish ring for the buffer we're going to feed to the sharks */
3208         if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3209                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3210                     __func__);
3211                 goto fail;
3212         }
3213
3214         m->m_data = pkt->data + sizeof(*rx_res);
3215         m->m_pkthdr.len = m->m_len = len;
3216
3217         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3218             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3219
3220         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3221
3222         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3223             "%s: phy_info: channel=%d, flags=0x%08x\n",
3224             __func__,
3225             le16toh(phy_info->channel),
3226             le16toh(phy_info->phy_flags));
3227
3228         /*
3229          * Populate an RX state struct with the provided information.
3230          */
3231         bzero(&rxs, sizeof(rxs));
3232         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3233         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3234         rxs.c_ieee = le16toh(phy_info->channel);
3235         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3236                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3237         } else {
3238                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3239         }
3240
3241         /* rssi is in 1/2db units */
3242         rxs.c_rssi = rssi * 2;
3243         rxs.c_nf = sc->sc_noise;
3244         if (ieee80211_add_rx_params(m, &rxs) == 0) {
3245                 if (ni)
3246                         ieee80211_free_node(ni);
3247                 goto fail;
3248         }
3249
3250         if (ieee80211_radiotap_active_vap(vap)) {
3251                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3252
3253                 tap->wr_flags = 0;
3254                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3255                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3256                 tap->wr_chan_freq = htole16(rxs.c_freq);
3257                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3258                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3259                 tap->wr_dbm_antsignal = (int8_t)rssi;
3260                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3261                 tap->wr_tsft = phy_info->system_timestamp;
3262                 switch (phy_info->rate) {
3263                 /* CCK rates. */
3264                 case  10: tap->wr_rate =   2; break;
3265                 case  20: tap->wr_rate =   4; break;
3266                 case  55: tap->wr_rate =  11; break;
3267                 case 110: tap->wr_rate =  22; break;
3268                 /* OFDM rates. */
3269                 case 0xd: tap->wr_rate =  12; break;
3270                 case 0xf: tap->wr_rate =  18; break;
3271                 case 0x5: tap->wr_rate =  24; break;
3272                 case 0x7: tap->wr_rate =  36; break;
3273                 case 0x9: tap->wr_rate =  48; break;
3274                 case 0xb: tap->wr_rate =  72; break;
3275                 case 0x1: tap->wr_rate =  96; break;
3276                 case 0x3: tap->wr_rate = 108; break;
3277                 /* Unknown rate: should not happen. */
3278                 default:  tap->wr_rate =   0;
3279                 }
3280         }
3281
3282         IWM_UNLOCK(sc);
3283         if (ni != NULL) {
3284                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3285                 ieee80211_input_mimo(ni, m);
3286                 ieee80211_free_node(ni);
3287         } else {
3288                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3289                 ieee80211_input_mimo_all(ic, m);
3290         }
3291         IWM_LOCK(sc);
3292
3293         return TRUE;
3294
3295 fail:
3296         counter_u64_add(ic->ic_ierrors, 1);
3297         return FALSE;
3298 }
3299
3300 static int
3301 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3302         struct iwm_node *in)
3303 {
3304         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3305         struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3306         struct ieee80211_node *ni = &in->in_ni;
3307         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3308
3309         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3310
3311         /* Update rate control statistics. */
3312         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3313             __func__,
3314             (int) le16toh(tx_resp->status.status),
3315             (int) le16toh(tx_resp->status.sequence),
3316             tx_resp->frame_count,
3317             tx_resp->bt_kill_count,
3318             tx_resp->failure_rts,
3319             tx_resp->failure_frame,
3320             le32toh(tx_resp->initial_rate),
3321             (int) le16toh(tx_resp->wireless_media_time));
3322
3323         txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3324                      IEEE80211_RATECTL_STATUS_LONG_RETRY;
3325         txs->short_retries = tx_resp->failure_rts;
3326         txs->long_retries = tx_resp->failure_frame;
3327         if (status != IWM_TX_STATUS_SUCCESS &&
3328             status != IWM_TX_STATUS_DIRECT_DONE) {
3329                 switch (status) {
3330                 case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3331                         txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3332                         break;
3333                 case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3334                         txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3335                         break;
3336                 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3337                         txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3338                         break;
3339                 default:
3340                         txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3341                         break;
3342                 }
3343         } else {
3344                 txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3345         }
3346         ieee80211_ratectl_tx_complete(ni, txs);
3347
3348         return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3349 }
3350
3351 static void
3352 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3353 {
3354         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3355         int idx = cmd_hdr->idx;
3356         int qid = cmd_hdr->qid;
3357         struct iwm_tx_ring *ring = &sc->txq[qid];
3358         struct iwm_tx_data *txd = &ring->data[idx];
3359         struct iwm_node *in = txd->in;
3360         struct mbuf *m = txd->m;
3361         int status;
3362
3363         KASSERT(txd->done == 0, ("txd not done"));
3364         KASSERT(txd->in != NULL, ("txd without node"));
3365         KASSERT(txd->m != NULL, ("txd without mbuf"));
3366
3367         sc->sc_tx_timer = 0;
3368
3369         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3370
3371         /* Unmap and free mbuf. */
3372         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3373         bus_dmamap_unload(ring->data_dmat, txd->map);
3374
3375         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3376             "free txd %p, in %p\n", txd, txd->in);
3377         txd->done = 1;
3378         txd->m = NULL;
3379         txd->in = NULL;
3380
3381         ieee80211_tx_complete(&in->in_ni, m, status);
3382
3383         if (--ring->queued < IWM_TX_RING_LOMARK) {
3384                 sc->qfullmsk &= ~(1 << ring->qid);
3385                 if (sc->qfullmsk == 0) {
3386                         iwm_start(sc);
3387                 }
3388         }
3389 }
3390
3391 /*
3392  * transmit side
3393  */
3394
3395 /*
3396  * Process a "command done" firmware notification.  This is where we wakeup
3397  * processes waiting for a synchronous command completion.
3398  * from if_iwn
3399  */
3400 static void
3401 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3402 {
3403         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3404         struct iwm_tx_data *data;
3405
3406         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3407                 return; /* Not a command ack. */
3408         }
3409
3410         /* XXX wide commands? */
3411         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3412             "cmd notification type 0x%x qid %d idx %d\n",
3413             pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3414
3415         data = &ring->data[pkt->hdr.idx];
3416
3417         /* If the command was mapped in an mbuf, free it. */
3418         if (data->m != NULL) {
3419                 bus_dmamap_sync(ring->data_dmat, data->map,
3420                     BUS_DMASYNC_POSTWRITE);
3421                 bus_dmamap_unload(ring->data_dmat, data->map);
3422                 m_freem(data->m);
3423                 data->m = NULL;
3424         }
3425         wakeup(&ring->desc[pkt->hdr.idx]);
3426
3427         if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3428                 device_printf(sc->sc_dev,
3429                     "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3430                     __func__, pkt->hdr.idx, ring->queued, ring->cur);
3431                 /* XXX call iwm_force_nmi() */
3432         }
3433
3434         KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3435         ring->queued--;
3436         if (ring->queued == 0)
3437                 iwm_pcie_clear_cmd_in_flight(sc);
3438 }
3439
3440 #if 0
3441 /*
3442  * necessary only for block ack mode
3443  */
3444 void
3445 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3446         uint16_t len)
3447 {
3448         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3449         uint16_t w_val;
3450
3451         scd_bc_tbl = sc->sched_dma.vaddr;
3452
3453         len += 8; /* magic numbers came naturally from paris */
3454         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3455                 len = roundup(len, 4) / 4;
3456
3457         w_val = htole16(sta_id << 12 | len);
3458
3459         /* Update TX scheduler. */
3460         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3461         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3462             BUS_DMASYNC_PREWRITE);
3463
3464         /* I really wonder what this is ?!? */
3465         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3466                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3467                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3468                     BUS_DMASYNC_PREWRITE);
3469         }
3470 }
3471 #endif
3472
3473 /*
3474  * Take an 802.11 (non-n) rate, find the relevant rate
3475  * table entry.  return the index into in_ridx[].
3476  *
3477  * The caller then uses that index back into in_ridx
3478  * to figure out the rate index programmed /into/
3479  * the firmware for this given node.
3480  */
3481 static int
3482 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3483     uint8_t rate)
3484 {
3485         int i;
3486         uint8_t r;
3487
3488         for (i = 0; i < nitems(in->in_ridx); i++) {
3489                 r = iwm_rates[in->in_ridx[i]].rate;
3490                 if (rate == r)
3491                         return (i);
3492         }
3493
3494         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3495             "%s: couldn't find an entry for rate=%d\n",
3496             __func__,
3497             rate);
3498
3499         /* XXX Return the first */
3500         /* XXX TODO: have it return the /lowest/ */
3501         return (0);
3502 }
3503
3504 static int
3505 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3506 {
3507         int i;
3508
3509         for (i = 0; i < nitems(iwm_rates); i++) {
3510                 if (iwm_rates[i].rate == rate)
3511                         return (i);
3512         }
3513         /* XXX error? */
3514         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3515             "%s: couldn't find an entry for rate=%d\n",
3516             __func__,
3517             rate);
3518         return (0);
3519 }
3520
3521 /*
3522  * Fill in the rate related information for a transmit command.
3523  */
3524 static const struct iwm_rate *
3525 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3526         struct mbuf *m, struct iwm_tx_cmd *tx)
3527 {
3528         struct ieee80211_node *ni = &in->in_ni;
3529         struct ieee80211_frame *wh;
3530         const struct ieee80211_txparam *tp = ni->ni_txparms;
3531         const struct iwm_rate *rinfo;
3532         int type;
3533         int ridx, rate_flags;
3534
3535         wh = mtod(m, struct ieee80211_frame *);
3536         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3537
3538         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3539         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3540
3541         if (type == IEEE80211_FC0_TYPE_MGT ||
3542             type == IEEE80211_FC0_TYPE_CTL ||
3543             (m->m_flags & M_EAPOL) != 0) {
3544                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3545                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3546                     "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3547         } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3548                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3549                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3550                     "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3551         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3552                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3553                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3554                     "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3555         } else {
3556                 int i;
3557
3558                 /* for data frames, use RS table */
3559                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3560                 /* XXX pass pktlen */
3561                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3562                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3563                 ridx = in->in_ridx[i];
3564
3565                 /* This is the index into the programmed table */
3566                 tx->initial_rate_index = i;
3567                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3568
3569                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3570                     "%s: start with i=%d, txrate %d\n",
3571                     __func__, i, iwm_rates[ridx].rate);
3572         }
3573
3574         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3575             "%s: frame type=%d txrate %d\n",
3576                 __func__, type, iwm_rates[ridx].rate);
3577
3578         rinfo = &iwm_rates[ridx];
3579
3580         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3581             __func__, ridx,
3582             rinfo->rate,
3583             !! (IWM_RIDX_IS_CCK(ridx))
3584             );
3585
3586         /* XXX TODO: hard-coded TX antenna? */
3587         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3588         if (IWM_RIDX_IS_CCK(ridx))
3589                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3590         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3591
3592         return rinfo;
3593 }
3594
3595 #define TB0_SIZE 16
3596 static int
3597 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3598 {
3599         struct ieee80211com *ic = &sc->sc_ic;
3600         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3601         struct iwm_node *in = IWM_NODE(ni);
3602         struct iwm_tx_ring *ring;
3603         struct iwm_tx_data *data;
3604         struct iwm_tfd *desc;
3605         struct iwm_device_cmd *cmd;
3606         struct iwm_tx_cmd *tx;
3607         struct ieee80211_frame *wh;
3608         struct ieee80211_key *k = NULL;
3609         struct mbuf *m1;
3610         const struct iwm_rate *rinfo;
3611         uint32_t flags;
3612         u_int hdrlen;
3613         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3614         int nsegs;
3615         uint8_t tid, type;
3616         int i, totlen, error, pad;
3617
3618         wh = mtod(m, struct ieee80211_frame *);
3619         hdrlen = ieee80211_anyhdrsize(wh);
3620         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3621         tid = 0;
3622         ring = &sc->txq[ac];
3623         desc = &ring->desc[ring->cur];
3624         memset(desc, 0, sizeof(*desc));
3625         data = &ring->data[ring->cur];
3626
3627         /* Fill out iwm_tx_cmd to send to the firmware */
3628         cmd = &ring->cmd[ring->cur];
3629         cmd->hdr.code = IWM_TX_CMD;
3630         cmd->hdr.flags = 0;
3631         cmd->hdr.qid = ring->qid;
3632         cmd->hdr.idx = ring->cur;
3633
3634         tx = (void *)cmd->data;
3635         memset(tx, 0, sizeof(*tx));
3636
3637         rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3638
3639         /* Encrypt the frame if need be. */
3640         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3641                 /* Retrieve key for TX && do software encryption. */
3642                 k = ieee80211_crypto_encap(ni, m);
3643                 if (k == NULL) {
3644                         m_freem(m);
3645                         return (ENOBUFS);
3646                 }
3647                 /* 802.11 header may have moved. */
3648                 wh = mtod(m, struct ieee80211_frame *);
3649         }
3650
3651         if (ieee80211_radiotap_active_vap(vap)) {
3652                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3653
3654                 tap->wt_flags = 0;
3655                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3656                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3657                 tap->wt_rate = rinfo->rate;
3658                 if (k != NULL)
3659                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3660                 ieee80211_radiotap_tx(vap, m);
3661         }
3662
3663
3664         totlen = m->m_pkthdr.len;
3665
3666         flags = 0;
3667         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3668                 flags |= IWM_TX_CMD_FLG_ACK;
3669         }
3670
3671         if (type == IEEE80211_FC0_TYPE_DATA
3672             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3673             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3674                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3675         }
3676
3677         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3678             type != IEEE80211_FC0_TYPE_DATA)
3679                 tx->sta_id = sc->sc_aux_sta.sta_id;
3680         else
3681                 tx->sta_id = IWM_STATION_ID;
3682
3683         if (type == IEEE80211_FC0_TYPE_MGT) {
3684                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3685
3686                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3687                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3688                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3689                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3690                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3691                 } else {
3692                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3693                 }
3694         } else {
3695                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3696         }
3697
3698         if (hdrlen & 3) {
3699                 /* First segment length must be a multiple of 4. */
3700                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3701                 pad = 4 - (hdrlen & 3);
3702         } else
3703                 pad = 0;
3704
3705         tx->driver_txop = 0;
3706         tx->next_frame_len = 0;
3707
3708         tx->len = htole16(totlen);
3709         tx->tid_tspec = tid;
3710         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3711
3712         /* Set physical address of "scratch area". */
3713         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3714         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3715
3716         /* Copy 802.11 header in TX command. */
3717         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3718
3719         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3720
3721         tx->sec_ctl = 0;
3722         tx->tx_flags |= htole32(flags);
3723
3724         /* Trim 802.11 header. */
3725         m_adj(m, hdrlen);
3726         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3727             segs, &nsegs, BUS_DMA_NOWAIT);
3728         if (error != 0) {
3729                 if (error != EFBIG) {
3730                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3731                             error);
3732                         m_freem(m);
3733                         return error;
3734                 }
3735                 /* Too many DMA segments, linearize mbuf. */
3736                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3737                 if (m1 == NULL) {
3738                         device_printf(sc->sc_dev,
3739                             "%s: could not defrag mbuf\n", __func__);
3740                         m_freem(m);
3741                         return (ENOBUFS);
3742                 }
3743                 m = m1;
3744
3745                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3746                     segs, &nsegs, BUS_DMA_NOWAIT);
3747                 if (error != 0) {
3748                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3749                             error);
3750                         m_freem(m);
3751                         return error;
3752                 }
3753         }
3754         data->m = m;
3755         data->in = in;
3756         data->done = 0;
3757
3758         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3759             "sending txd %p, in %p\n", data, data->in);
3760         KASSERT(data->in != NULL, ("node is NULL"));
3761
3762         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3763             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3764             ring->qid, ring->cur, totlen, nsegs,
3765             le32toh(tx->tx_flags),
3766             le32toh(tx->rate_n_flags),
3767             tx->initial_rate_index
3768             );
3769
3770         /* Fill TX descriptor. */
3771         desc->num_tbs = 2 + nsegs;
3772
3773         desc->tbs[0].lo = htole32(data->cmd_paddr);
3774         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3775             (TB0_SIZE << 4);
3776         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3777         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3778             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3779               + hdrlen + pad - TB0_SIZE) << 4);
3780
3781         /* Other DMA segments are for data payload. */
3782         for (i = 0; i < nsegs; i++) {
3783                 seg = &segs[i];
3784                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3785                 desc->tbs[i+2].hi_n_len = \
3786                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3787                     | ((seg->ds_len) << 4);
3788         }
3789
3790         bus_dmamap_sync(ring->data_dmat, data->map,
3791             BUS_DMASYNC_PREWRITE);
3792         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3793             BUS_DMASYNC_PREWRITE);
3794         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3795             BUS_DMASYNC_PREWRITE);
3796
3797 #if 0
3798         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3799 #endif
3800
3801         /* Kick TX ring. */
3802         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3803         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3804
3805         /* Mark TX ring as full if we reach a certain threshold. */
3806         if (++ring->queued > IWM_TX_RING_HIMARK) {
3807                 sc->qfullmsk |= 1 << ring->qid;
3808         }
3809
3810         return 0;
3811 }
3812
3813 static int
3814 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3815     const struct ieee80211_bpf_params *params)
3816 {
3817         struct ieee80211com *ic = ni->ni_ic;
3818         struct iwm_softc *sc = ic->ic_softc;
3819         int error = 0;
3820
3821         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3822             "->%s begin\n", __func__);
3823
3824         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3825                 m_freem(m);
3826                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3827                     "<-%s not RUNNING\n", __func__);
3828                 return (ENETDOWN);
3829         }
3830
3831         IWM_LOCK(sc);
3832         /* XXX fix this */
3833         if (params == NULL) {
3834                 error = iwm_tx(sc, m, ni, 0);
3835         } else {
3836                 error = iwm_tx(sc, m, ni, 0);
3837         }
3838         sc->sc_tx_timer = 5;
3839         IWM_UNLOCK(sc);
3840
3841         return (error);
3842 }
3843
3844 /*
3845  * mvm/tx.c
3846  */
3847
3848 /*
3849  * Note that there are transports that buffer frames before they reach
3850  * the firmware. This means that after flush_tx_path is called, the
3851  * queue might not be empty. The race-free way to handle this is to:
3852  * 1) set the station as draining
3853  * 2) flush the Tx path
3854  * 3) wait for the transport queues to be empty
3855  */
3856 int
3857 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3858 {
3859         int ret;
3860         struct iwm_tx_path_flush_cmd flush_cmd = {
3861                 .queues_ctl = htole32(tfd_msk),
3862                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3863         };
3864
3865         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3866             sizeof(flush_cmd), &flush_cmd);
3867         if (ret)
3868                 device_printf(sc->sc_dev,
3869                     "Flushing tx queue failed: %d\n", ret);
3870         return ret;
3871 }
3872
3873 /*
3874  * BEGIN mvm/quota.c
3875  */
3876
3877 static int
3878 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3879 {
3880         struct iwm_time_quota_cmd cmd;
3881         int i, idx, ret, num_active_macs, quota, quota_rem;
3882         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3883         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3884         uint16_t id;
3885
3886         memset(&cmd, 0, sizeof(cmd));
3887
3888         /* currently, PHY ID == binding ID */
3889         if (ivp) {
3890                 id = ivp->phy_ctxt->id;
3891                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3892                 colors[id] = ivp->phy_ctxt->color;
3893
3894                 if (1)
3895                         n_ifs[id] = 1;
3896         }
3897
3898         /*
3899          * The FW's scheduling session consists of
3900          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3901          * equally between all the bindings that require quota
3902          */
3903         num_active_macs = 0;
3904         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3905                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3906                 num_active_macs += n_ifs[i];
3907         }
3908
3909         quota = 0;
3910         quota_rem = 0;
3911         if (num_active_macs) {
3912                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3913                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3914         }
3915
3916         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3917                 if (colors[i] < 0)
3918                         continue;
3919
3920                 cmd.quotas[idx].id_and_color =
3921                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3922
3923                 if (n_ifs[i] <= 0) {
3924                         cmd.quotas[idx].quota = htole32(0);
3925                         cmd.quotas[idx].max_duration = htole32(0);
3926                 } else {
3927                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3928                         cmd.quotas[idx].max_duration = htole32(0);
3929                 }
3930                 idx++;
3931         }
3932
3933         /* Give the remainder of the session to the first binding */
3934         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3935
3936         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3937             sizeof(cmd), &cmd);
3938         if (ret)
3939                 device_printf(sc->sc_dev,
3940                     "%s: Failed to send quota: %d\n", __func__, ret);
3941         return ret;
3942 }
3943
3944 /*
3945  * END mvm/quota.c
3946  */
3947
3948 /*
3949  * ieee80211 routines
3950  */
3951
3952 /*
3953  * Change to AUTH state in 80211 state machine.  Roughly matches what
3954  * Linux does in bss_info_changed().
3955  */
3956 static int
3957 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3958 {
3959         struct ieee80211_node *ni;
3960         struct iwm_node *in;
3961         struct iwm_vap *iv = IWM_VAP(vap);
3962         uint32_t duration;
3963         int error;
3964
3965         /*
3966          * XXX i have a feeling that the vap node is being
3967          * freed from underneath us. Grr.
3968          */
3969         ni = ieee80211_ref_node(vap->iv_bss);
3970         in = IWM_NODE(ni);
3971         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3972             "%s: called; vap=%p, bss ni=%p\n",
3973             __func__,
3974             vap,
3975             ni);
3976
3977         in->in_assoc = 0;
3978
3979         /*
3980          * Firmware bug - it'll crash if the beacon interval is less
3981          * than 16. We can't avoid connecting at all, so refuse the
3982          * station state change, this will cause net80211 to abandon
3983          * attempts to connect to this AP, and eventually wpa_s will
3984          * blacklist the AP...
3985          */
3986         if (ni->ni_intval < 16) {
3987                 device_printf(sc->sc_dev,
3988                     "AP %s beacon interval is %d, refusing due to firmware bug!\n",
3989                     ether_sprintf(ni->ni_bssid), ni->ni_intval);
3990                 error = EINVAL;
3991                 goto out;
3992         }
3993
3994         error = iwm_allow_mcast(vap, sc);
3995         if (error) {
3996                 device_printf(sc->sc_dev,
3997                     "%s: failed to set multicast\n", __func__);
3998                 goto out;
3999         }
4000
4001         /*
4002          * This is where it deviates from what Linux does.
4003          *
4004          * Linux iwlwifi doesn't reset the nic each time, nor does it
4005          * call ctxt_add() here.  Instead, it adds it during vap creation,
4006          * and always does a mac_ctx_changed().
4007          *
4008          * The openbsd port doesn't attempt to do that - it reset things
4009          * at odd states and does the add here.
4010          *
4011          * So, until the state handling is fixed (ie, we never reset
4012          * the NIC except for a firmware failure, which should drag
4013          * the NIC back to IDLE, re-setup and re-add all the mac/phy
4014          * contexts that are required), let's do a dirty hack here.
4015          */
4016         if (iv->is_uploaded) {
4017                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4018                         device_printf(sc->sc_dev,
4019                             "%s: failed to update MAC\n", __func__);
4020                         goto out;
4021                 }
4022         } else {
4023                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4024                         device_printf(sc->sc_dev,
4025                             "%s: failed to add MAC\n", __func__);
4026                         goto out;
4027                 }
4028         }
4029
4030         if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4031             in->in_ni.ni_chan, 1, 1)) != 0) {
4032                 device_printf(sc->sc_dev,
4033                     "%s: failed update phy ctxt\n", __func__);
4034                 goto out;
4035         }
4036         iv->phy_ctxt = &sc->sc_phyctxt[0];
4037
4038         if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4039                 device_printf(sc->sc_dev,
4040                     "%s: binding update cmd\n", __func__);
4041                 goto out;
4042         }
4043         /*
4044          * Authentication becomes unreliable when powersaving is left enabled
4045          * here. Powersaving will be activated again when association has
4046          * finished or is aborted.
4047          */
4048         iv->ps_disabled = TRUE;
4049         error = iwm_mvm_power_update_mac(sc);
4050         iv->ps_disabled = FALSE;
4051         if (error != 0) {
4052                 device_printf(sc->sc_dev,
4053                     "%s: failed to update power management\n",
4054                     __func__);
4055                 goto out;
4056         }
4057         if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4058                 device_printf(sc->sc_dev,
4059                     "%s: failed to add sta\n", __func__);
4060                 goto out;
4061         }
4062
4063         /*
4064          * Prevent the FW from wandering off channel during association
4065          * by "protecting" the session with a time event.
4066          */
4067         /* XXX duration is in units of TU, not MS */
4068         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4069         iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4070         DELAY(100);
4071
4072         error = 0;
4073 out:
4074         ieee80211_free_node(ni);
4075         return (error);
4076 }
4077
4078 static int
4079 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4080 {
4081         uint32_t tfd_msk;
4082
4083         /*
4084          * Ok, so *technically* the proper set of calls for going
4085          * from RUN back to SCAN is:
4086          *
4087          * iwm_mvm_power_mac_disable(sc, in);
4088          * iwm_mvm_mac_ctxt_changed(sc, vap);
4089          * iwm_mvm_rm_sta(sc, in);
4090          * iwm_mvm_update_quotas(sc, NULL);
4091          * iwm_mvm_mac_ctxt_changed(sc, in);
4092          * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4093          * iwm_mvm_mac_ctxt_remove(sc, in);
4094          *
4095          * However, that freezes the device not matter which permutations
4096          * and modifications are attempted.  Obviously, this driver is missing
4097          * something since it works in the Linux driver, but figuring out what
4098          * is missing is a little more complicated.  Now, since we're going
4099          * back to nothing anyway, we'll just do a complete device reset.
4100          * Up your's, device!
4101          */
4102         /*
4103          * Just using 0xf for the queues mask is fine as long as we only
4104          * get here from RUN state.
4105          */
4106         tfd_msk = 0xf;
4107         mbufq_drain(&sc->sc_snd);
4108         iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4109         /*
4110          * We seem to get away with just synchronously sending the
4111          * IWM_TXPATH_FLUSH command.
4112          */
4113 //      iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4114         iwm_stop_device(sc);
4115         iwm_init_hw(sc);
4116         if (in)
4117                 in->in_assoc = 0;
4118         return 0;
4119
4120 #if 0
4121         int error;
4122
4123         iwm_mvm_power_mac_disable(sc, in);
4124
4125         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4126                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4127                 return error;
4128         }
4129
4130         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4131                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4132                 return error;
4133         }
4134         error = iwm_mvm_rm_sta(sc, in);
4135         in->in_assoc = 0;
4136         iwm_mvm_update_quotas(sc, NULL);
4137         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4138                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4139                 return error;
4140         }
4141         iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4142
4143         iwm_mvm_mac_ctxt_remove(sc, in);
4144
4145         return error;
4146 #endif
4147 }
4148
4149 static struct ieee80211_node *
4150 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4151 {
4152         return malloc(sizeof (struct iwm_node), M_80211_NODE,
4153             M_NOWAIT | M_ZERO);
4154 }
4155
4156 uint8_t
4157 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4158 {
4159         int i;
4160         uint8_t rval;
4161
4162         for (i = 0; i < rs->rs_nrates; i++) {
4163                 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4164                 if (rval == iwm_rates[ridx].rate)
4165                         return rs->rs_rates[i];
4166         }
4167
4168         return 0;
4169 }
4170
4171 static void
4172 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4173 {
4174         struct ieee80211_node *ni = &in->in_ni;
4175         struct iwm_lq_cmd *lq = &in->in_lq;
4176         int nrates = ni->ni_rates.rs_nrates;
4177         int i, ridx, tab = 0;
4178 //      int txant = 0;
4179
4180         if (nrates > nitems(lq->rs_table)) {
4181                 device_printf(sc->sc_dev,
4182                     "%s: node supports %d rates, driver handles "
4183                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4184                 return;
4185         }
4186         if (nrates == 0) {
4187                 device_printf(sc->sc_dev,
4188                     "%s: node supports 0 rates, odd!\n", __func__);
4189                 return;
4190         }
4191
4192         /*
4193          * XXX .. and most of iwm_node is not initialised explicitly;
4194          * it's all just 0x0 passed to the firmware.
4195          */
4196
4197         /* first figure out which rates we should support */
4198         /* XXX TODO: this isn't 11n aware /at all/ */
4199         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4200         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4201             "%s: nrates=%d\n", __func__, nrates);
4202
4203         /*
4204          * Loop over nrates and populate in_ridx from the highest
4205          * rate to the lowest rate.  Remember, in_ridx[] has
4206          * IEEE80211_RATE_MAXSIZE entries!
4207          */
4208         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4209                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4210
4211                 /* Map 802.11 rate to HW rate index. */
4212                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4213                         if (iwm_rates[ridx].rate == rate)
4214                                 break;
4215                 if (ridx > IWM_RIDX_MAX) {
4216                         device_printf(sc->sc_dev,
4217                             "%s: WARNING: device rate for %d not found!\n",
4218                             __func__, rate);
4219                 } else {
4220                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4221                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4222                             __func__,
4223                             i,
4224                             rate,
4225                             ridx);
4226                         in->in_ridx[i] = ridx;
4227                 }
4228         }
4229
4230         /* then construct a lq_cmd based on those */
4231         memset(lq, 0, sizeof(*lq));
4232         lq->sta_id = IWM_STATION_ID;
4233
4234         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4235         if (ni->ni_flags & IEEE80211_NODE_HT)
4236                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4237
4238         /*
4239          * are these used? (we don't do SISO or MIMO)
4240          * need to set them to non-zero, though, or we get an error.
4241          */
4242         lq->single_stream_ant_msk = 1;
4243         lq->dual_stream_ant_msk = 1;
4244
4245         /*
4246          * Build the actual rate selection table.
4247          * The lowest bits are the rates.  Additionally,
4248          * CCK needs bit 9 to be set.  The rest of the bits
4249          * we add to the table select the tx antenna
4250          * Note that we add the rates in the highest rate first
4251          * (opposite of ni_rates).
4252          */
4253         /*
4254          * XXX TODO: this should be looping over the min of nrates
4255          * and LQ_MAX_RETRY_NUM.  Sigh.
4256          */
4257         for (i = 0; i < nrates; i++) {
4258                 int nextant;
4259
4260 #if 0
4261                 if (txant == 0)
4262                         txant = iwm_mvm_get_valid_tx_ant(sc);
4263                 nextant = 1<<(ffs(txant)-1);
4264                 txant &= ~nextant;
4265 #else
4266                 nextant = iwm_mvm_get_valid_tx_ant(sc);
4267 #endif
4268                 /*
4269                  * Map the rate id into a rate index into
4270                  * our hardware table containing the
4271                  * configuration to use for this rate.
4272                  */
4273                 ridx = in->in_ridx[i];
4274                 tab = iwm_rates[ridx].plcp;
4275                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4276                 if (IWM_RIDX_IS_CCK(ridx))
4277                         tab |= IWM_RATE_MCS_CCK_MSK;
4278                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4279                     "station rate i=%d, rate=%d, hw=%x\n",
4280                     i, iwm_rates[ridx].rate, tab);
4281                 lq->rs_table[i] = htole32(tab);
4282         }
4283         /* then fill the rest with the lowest possible rate */
4284         for (i = nrates; i < nitems(lq->rs_table); i++) {
4285                 KASSERT(tab != 0, ("invalid tab"));
4286                 lq->rs_table[i] = htole32(tab);
4287         }
4288 }
4289
4290 static int
4291 iwm_media_change(struct ifnet *ifp)
4292 {
4293         struct ieee80211vap *vap = ifp->if_softc;
4294         struct ieee80211com *ic = vap->iv_ic;
4295         struct iwm_softc *sc = ic->ic_softc;
4296         int error;
4297
4298         error = ieee80211_media_change(ifp);
4299         if (error != ENETRESET)
4300                 return error;
4301
4302         IWM_LOCK(sc);
4303         if (ic->ic_nrunning > 0) {
4304                 iwm_stop(sc);
4305                 iwm_init(sc);
4306         }
4307         IWM_UNLOCK(sc);
4308         return error;
4309 }
4310
4311
4312 static int
4313 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4314 {
4315         struct iwm_vap *ivp = IWM_VAP(vap);
4316         struct ieee80211com *ic = vap->iv_ic;
4317         struct iwm_softc *sc = ic->ic_softc;
4318         struct iwm_node *in;
4319         int error;
4320
4321         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4322             "switching state %s -> %s\n",
4323             ieee80211_state_name[vap->iv_state],
4324             ieee80211_state_name[nstate]);
4325         IEEE80211_UNLOCK(ic);
4326         IWM_LOCK(sc);
4327
4328         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4329                 iwm_led_blink_stop(sc);
4330
4331         /* disable beacon filtering if we're hopping out of RUN */
4332         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4333                 iwm_mvm_disable_beacon_filter(sc);
4334
4335                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4336                         in->in_assoc = 0;
4337
4338                 if (nstate == IEEE80211_S_INIT) {
4339                         IWM_UNLOCK(sc);
4340                         IEEE80211_LOCK(ic);
4341                         error = ivp->iv_newstate(vap, nstate, arg);
4342                         IEEE80211_UNLOCK(ic);
4343                         IWM_LOCK(sc);
4344                         iwm_release(sc, NULL);
4345                         IWM_UNLOCK(sc);
4346                         IEEE80211_LOCK(ic);
4347                         return error;
4348                 }
4349
4350                 /*
4351                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4352                  * above then the card will be completely reinitialized,
4353                  * so the driver must do everything necessary to bring the card
4354                  * from INIT to SCAN.
4355                  *
4356                  * Additionally, upon receiving deauth frame from AP,
4357                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4358                  * state. This will also fail with this driver, so bring the FSM
4359                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4360                  *
4361                  * XXX TODO: fix this for FreeBSD!
4362                  */
4363                 if (nstate == IEEE80211_S_SCAN ||
4364                     nstate == IEEE80211_S_AUTH ||
4365                     nstate == IEEE80211_S_ASSOC) {
4366                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4367                             "Force transition to INIT; MGT=%d\n", arg);
4368                         IWM_UNLOCK(sc);
4369                         IEEE80211_LOCK(ic);
4370                         /* Always pass arg as -1 since we can't Tx right now. */
4371                         /*
4372                          * XXX arg is just ignored anyway when transitioning
4373                          *     to IEEE80211_S_INIT.
4374                          */
4375                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4376                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4377                             "Going INIT->SCAN\n");
4378                         nstate = IEEE80211_S_SCAN;
4379                         IEEE80211_UNLOCK(ic);
4380                         IWM_LOCK(sc);
4381                 }
4382         }
4383
4384         switch (nstate) {
4385         case IEEE80211_S_INIT:
4386         case IEEE80211_S_SCAN:
4387                 if (vap->iv_state == IEEE80211_S_AUTH ||
4388                     vap->iv_state == IEEE80211_S_ASSOC) {
4389                         int myerr;
4390                         IWM_UNLOCK(sc);
4391                         IEEE80211_LOCK(ic);
4392                         myerr = ivp->iv_newstate(vap, nstate, arg);
4393                         IEEE80211_UNLOCK(ic);
4394                         IWM_LOCK(sc);
4395                         error = iwm_mvm_rm_sta(sc, vap, FALSE);
4396                         if (error) {
4397                                 device_printf(sc->sc_dev,
4398                                     "%s: Failed to remove station: %d\n",
4399                                     __func__, error);
4400                         }
4401                         error = iwm_mvm_mac_ctxt_changed(sc, vap);
4402                         if (error) {
4403                                 device_printf(sc->sc_dev,
4404                                     "%s: Failed to change mac context: %d\n",
4405                                     __func__, error);
4406                         }
4407                         error = iwm_mvm_binding_remove_vif(sc, ivp);
4408                         if (error) {
4409                                 device_printf(sc->sc_dev,
4410                                     "%s: Failed to remove channel ctx: %d\n",
4411                                     __func__, error);
4412                         }
4413                         ivp->phy_ctxt = NULL;
4414                         error = iwm_mvm_power_update_mac(sc);
4415                         if (error != 0) {
4416                                 device_printf(sc->sc_dev,
4417                                     "%s: failed to update power management\n",
4418                                     __func__);
4419                         }
4420                         IWM_UNLOCK(sc);
4421                         IEEE80211_LOCK(ic);
4422                         return myerr;
4423                 }
4424                 break;
4425
4426         case IEEE80211_S_AUTH:
4427                 if ((error = iwm_auth(vap, sc)) != 0) {
4428                         device_printf(sc->sc_dev,
4429                             "%s: could not move to auth state: %d\n",
4430                             __func__, error);
4431                 }
4432                 break;
4433
4434         case IEEE80211_S_ASSOC:
4435                 /*
4436                  * EBS may be disabled due to previous failures reported by FW.
4437                  * Reset EBS status here assuming environment has been changed.
4438                  */
4439                 sc->last_ebs_successful = TRUE;
4440                 break;
4441
4442         case IEEE80211_S_RUN:
4443         {
4444                 struct iwm_host_cmd cmd = {
4445                         .id = IWM_LQ_CMD,
4446                         .len = { sizeof(in->in_lq), },
4447                         .flags = IWM_CMD_SYNC,
4448                 };
4449
4450                 in = IWM_NODE(vap->iv_bss);
4451                 /* Update the association state, now we have it all */
4452                 /* (eg associd comes in at this point */
4453                 error = iwm_mvm_update_sta(sc, in);
4454                 if (error != 0) {
4455                         device_printf(sc->sc_dev,
4456                             "%s: failed to update STA\n", __func__);
4457                         IWM_UNLOCK(sc);
4458                         IEEE80211_LOCK(ic);
4459                         return error;
4460                 }
4461                 in->in_assoc = 1;
4462                 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4463                 if (error != 0) {
4464                         device_printf(sc->sc_dev,
4465                             "%s: failed to update MAC: %d\n", __func__, error);
4466                 }
4467
4468                 iwm_mvm_sf_update(sc, vap, FALSE);
4469                 iwm_mvm_enable_beacon_filter(sc, ivp);
4470                 iwm_mvm_power_update_mac(sc);
4471                 iwm_mvm_update_quotas(sc, ivp);
4472                 iwm_setrates(sc, in);
4473
4474                 cmd.data[0] = &in->in_lq;
4475                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4476                         device_printf(sc->sc_dev,
4477                             "%s: IWM_LQ_CMD failed\n", __func__);
4478                 }
4479
4480                 iwm_mvm_led_enable(sc);
4481                 break;
4482         }
4483
4484         default:
4485                 break;
4486         }
4487         IWM_UNLOCK(sc);
4488         IEEE80211_LOCK(ic);
4489
4490         return (ivp->iv_newstate(vap, nstate, arg));
4491 }
4492
4493 void
4494 iwm_endscan_cb(void *arg, int pending)
4495 {
4496         struct iwm_softc *sc = arg;
4497         struct ieee80211com *ic = &sc->sc_ic;
4498
4499         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4500             "%s: scan ended\n",
4501             __func__);
4502
4503         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4504 }
4505
4506 static int
4507 iwm_send_bt_init_conf(struct iwm_softc *sc)
4508 {
4509         struct iwm_bt_coex_cmd bt_cmd;
4510
4511         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4512         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4513
4514         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4515             &bt_cmd);
4516 }
4517
4518 static boolean_t
4519 iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4520 {
4521         boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4522         boolean_t tlv_lar = fw_has_capa(&sc->ucode_capa,
4523                                         IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4524
4525         if (iwm_lar_disable)
4526                 return FALSE;
4527
4528         /*
4529          * Enable LAR only if it is supported by the FW (TLV) &&
4530          * enabled in the NVM
4531          */
4532         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4533                 return nvm_lar && tlv_lar;
4534         else
4535                 return tlv_lar;
4536 }
4537
4538 static boolean_t
4539 iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4540 {
4541         return fw_has_api(&sc->ucode_capa,
4542                           IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4543                fw_has_capa(&sc->ucode_capa,
4544                            IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4545 }
4546
4547 static int
4548 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4549 {
4550         struct iwm_mcc_update_cmd mcc_cmd;
4551         struct iwm_host_cmd hcmd = {
4552                 .id = IWM_MCC_UPDATE_CMD,
4553                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4554                 .data = { &mcc_cmd },
4555         };
4556         int ret;
4557 #ifdef IWM_DEBUG
4558         struct iwm_rx_packet *pkt;
4559         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4560         struct iwm_mcc_update_resp *mcc_resp;
4561         int n_channels;
4562         uint16_t mcc;
4563 #endif
4564         int resp_v2 = fw_has_capa(&sc->ucode_capa,
4565             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4566
4567         if (!iwm_mvm_is_lar_supported(sc)) {
4568                 IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4569                     __func__);
4570                 return 0;
4571         }
4572
4573         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4574         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4575         if (iwm_mvm_is_wifi_mcc_supported(sc))
4576                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4577         else
4578                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4579
4580         if (resp_v2)
4581                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4582         else
4583                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4584
4585         IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4586             "send MCC update to FW with '%c%c' src = %d\n",
4587             alpha2[0], alpha2[1], mcc_cmd.source_id);
4588
4589         ret = iwm_send_cmd(sc, &hcmd);
4590         if (ret)
4591                 return ret;
4592
4593 #ifdef IWM_DEBUG
4594         pkt = hcmd.resp_pkt;
4595
4596         /* Extract MCC response */
4597         if (resp_v2) {
4598                 mcc_resp = (void *)pkt->data;
4599                 mcc = mcc_resp->mcc;
4600                 n_channels =  le32toh(mcc_resp->n_channels);
4601         } else {
4602                 mcc_resp_v1 = (void *)pkt->data;
4603                 mcc = mcc_resp_v1->mcc;
4604                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4605         }
4606
4607         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4608         if (mcc == 0)
4609                 mcc = 0x3030;  /* "00" - world */
4610
4611         IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4612             "regulatory domain '%c%c' (%d channels available)\n",
4613             mcc >> 8, mcc & 0xff, n_channels);
4614 #endif
4615         iwm_free_resp(sc, &hcmd);
4616
4617         return 0;
4618 }
4619
4620 static void
4621 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4622 {
4623         struct iwm_host_cmd cmd = {
4624                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4625                 .len = { sizeof(uint32_t), },
4626                 .data = { &backoff, },
4627         };
4628
4629         if (iwm_send_cmd(sc, &cmd) != 0) {
4630                 device_printf(sc->sc_dev,
4631                     "failed to change thermal tx backoff\n");
4632         }
4633 }
4634
4635 static int
4636 iwm_init_hw(struct iwm_softc *sc)
4637 {
4638         struct ieee80211com *ic = &sc->sc_ic;
4639         int error, i, ac;
4640
4641         sc->sf_state = IWM_SF_UNINIT;
4642
4643         if ((error = iwm_start_hw(sc)) != 0) {
4644                 printf("iwm_start_hw: failed %d\n", error);
4645                 return error;
4646         }
4647
4648         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4649                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4650                 return error;
4651         }
4652
4653         /*
4654          * should stop and start HW since that INIT
4655          * image just loaded
4656          */
4657         iwm_stop_device(sc);
4658         sc->sc_ps_disabled = FALSE;
4659         if ((error = iwm_start_hw(sc)) != 0) {
4660                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4661                 return error;
4662         }
4663
4664         /* omstart, this time with the regular firmware */
4665         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4666         if (error) {
4667                 device_printf(sc->sc_dev, "could not load firmware\n");
4668                 goto error;
4669         }
4670
4671         error = iwm_mvm_sf_update(sc, NULL, FALSE);
4672         if (error)
4673                 device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4674
4675         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4676                 device_printf(sc->sc_dev, "bt init conf failed\n");
4677                 goto error;
4678         }
4679
4680         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4681         if (error != 0) {
4682                 device_printf(sc->sc_dev, "antenna config failed\n");
4683                 goto error;
4684         }
4685
4686         /* Send phy db control command and then phy db calibration */
4687         if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4688                 goto error;
4689
4690         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4691                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4692                 goto error;
4693         }
4694
4695         /* Add auxiliary station for scanning */
4696         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4697                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4698                 goto error;
4699         }
4700
4701         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4702                 /*
4703                  * The channel used here isn't relevant as it's
4704                  * going to be overwritten in the other flows.
4705                  * For now use the first channel we have.
4706                  */
4707                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4708                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4709                         goto error;
4710         }
4711
4712         /* Initialize tx backoffs to the minimum. */
4713         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4714                 iwm_mvm_tt_tx_backoff(sc, 0);
4715
4716         error = iwm_mvm_power_update_device(sc);
4717         if (error)
4718                 goto error;
4719
4720         if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4721                 goto error;
4722
4723         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4724                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4725                         goto error;
4726         }
4727
4728         /* Enable Tx queues. */
4729         for (ac = 0; ac < WME_NUM_AC; ac++) {
4730                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4731                     iwm_mvm_ac_to_tx_fifo[ac]);
4732                 if (error)
4733                         goto error;
4734         }
4735
4736         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4737                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4738                 goto error;
4739         }
4740
4741         return 0;
4742
4743  error:
4744         iwm_stop_device(sc);
4745         return error;
4746 }
4747
4748 /* Allow multicast from our BSSID. */
4749 static int
4750 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4751 {
4752         struct ieee80211_node *ni = vap->iv_bss;
4753         struct iwm_mcast_filter_cmd *cmd;
4754         size_t size;
4755         int error;
4756
4757         size = roundup(sizeof(*cmd), 4);
4758         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4759         if (cmd == NULL)
4760                 return ENOMEM;
4761         cmd->filter_own = 1;
4762         cmd->port_id = 0;
4763         cmd->count = 0;
4764         cmd->pass_all = 1;
4765         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4766
4767         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4768             IWM_CMD_SYNC, size, cmd);
4769         free(cmd, M_DEVBUF);
4770
4771         return (error);
4772 }
4773
4774 /*
4775  * ifnet interfaces
4776  */
4777
4778 static void
4779 iwm_init(struct iwm_softc *sc)
4780 {
4781         int error;
4782
4783         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4784                 return;
4785         }
4786         sc->sc_generation++;
4787         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4788
4789         if ((error = iwm_init_hw(sc)) != 0) {
4790                 printf("iwm_init_hw failed %d\n", error);
4791                 iwm_stop(sc);
4792                 return;
4793         }
4794
4795         /*
4796          * Ok, firmware loaded and we are jogging
4797          */
4798         sc->sc_flags |= IWM_FLAG_HW_INITED;
4799         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4800 }
4801
4802 static int
4803 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4804 {
4805         struct iwm_softc *sc;
4806         int error;
4807
4808         sc = ic->ic_softc;
4809
4810         IWM_LOCK(sc);
4811         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4812                 IWM_UNLOCK(sc);
4813                 return (ENXIO);
4814         }
4815         error = mbufq_enqueue(&sc->sc_snd, m);
4816         if (error) {
4817                 IWM_UNLOCK(sc);
4818                 return (error);
4819         }
4820         iwm_start(sc);
4821         IWM_UNLOCK(sc);
4822         return (0);
4823 }
4824
4825 /*
4826  * Dequeue packets from sendq and call send.
4827  */
4828 static void
4829 iwm_start(struct iwm_softc *sc)
4830 {
4831         struct ieee80211_node *ni;
4832         struct mbuf *m;
4833         int ac = 0;
4834
4835         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4836         while (sc->qfullmsk == 0 &&
4837                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4838                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4839                 if (iwm_tx(sc, m, ni, ac) != 0) {
4840                         if_inc_counter(ni->ni_vap->iv_ifp,
4841                             IFCOUNTER_OERRORS, 1);
4842                         ieee80211_free_node(ni);
4843                         continue;
4844                 }
4845                 sc->sc_tx_timer = 15;
4846         }
4847         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4848 }
4849
4850 static void
4851 iwm_stop(struct iwm_softc *sc)
4852 {
4853
4854         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4855         sc->sc_flags |= IWM_FLAG_STOPPED;
4856         sc->sc_generation++;
4857         iwm_led_blink_stop(sc);
4858         sc->sc_tx_timer = 0;
4859         iwm_stop_device(sc);
4860         sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4861 }
4862
4863 static void
4864 iwm_watchdog(void *arg)
4865 {
4866         struct iwm_softc *sc = arg;
4867         struct ieee80211com *ic = &sc->sc_ic;
4868
4869         if (sc->sc_tx_timer > 0) {
4870                 if (--sc->sc_tx_timer == 0) {
4871                         device_printf(sc->sc_dev, "device timeout\n");
4872 #ifdef IWM_DEBUG
4873                         iwm_nic_error(sc);
4874 #endif
4875                         ieee80211_restart_all(ic);
4876                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4877                         return;
4878                 }
4879         }
4880         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4881 }
4882
4883 static void
4884 iwm_parent(struct ieee80211com *ic)
4885 {
4886         struct iwm_softc *sc = ic->ic_softc;
4887         int startall = 0;
4888
4889         IWM_LOCK(sc);
4890         if (ic->ic_nrunning > 0) {
4891                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4892                         iwm_init(sc);
4893                         startall = 1;
4894                 }
4895         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4896                 iwm_stop(sc);
4897         IWM_UNLOCK(sc);
4898         if (startall)
4899                 ieee80211_start_all(ic);
4900 }
4901
4902 /*
4903  * The interrupt side of things
4904  */
4905
4906 /*
4907  * error dumping routines are from iwlwifi/mvm/utils.c
4908  */
4909
4910 /*
4911  * Note: This structure is read from the device with IO accesses,
4912  * and the reading already does the endian conversion. As it is
4913  * read with uint32_t-sized accesses, any members with a different size
4914  * need to be ordered correctly though!
4915  */
4916 struct iwm_error_event_table {
4917         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4918         uint32_t error_id;              /* type of error */
4919         uint32_t trm_hw_status0;        /* TRM HW status */
4920         uint32_t trm_hw_status1;        /* TRM HW status */
4921         uint32_t blink2;                /* branch link */
4922         uint32_t ilink1;                /* interrupt link */
4923         uint32_t ilink2;                /* interrupt link */
4924         uint32_t data1;         /* error-specific data */
4925         uint32_t data2;         /* error-specific data */
4926         uint32_t data3;         /* error-specific data */
4927         uint32_t bcon_time;             /* beacon timer */
4928         uint32_t tsf_low;               /* network timestamp function timer */
4929         uint32_t tsf_hi;                /* network timestamp function timer */
4930         uint32_t gp1;           /* GP1 timer register */
4931         uint32_t gp2;           /* GP2 timer register */
4932         uint32_t fw_rev_type;   /* firmware revision type */
4933         uint32_t major;         /* uCode version major */
4934         uint32_t minor;         /* uCode version minor */
4935         uint32_t hw_ver;                /* HW Silicon version */
4936         uint32_t brd_ver;               /* HW board version */
4937         uint32_t log_pc;                /* log program counter */
4938         uint32_t frame_ptr;             /* frame pointer */
4939         uint32_t stack_ptr;             /* stack pointer */
4940         uint32_t hcmd;          /* last host command header */
4941         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4942                                  * rxtx_flag */
4943         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4944                                  * host_flag */
4945         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4946                                  * enc_flag */
4947         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4948                                  * time_flag */
4949         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4950                                  * wico interrupt */
4951         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
4952         uint32_t wait_event;            /* wait event() caller address */
4953         uint32_t l2p_control;   /* L2pControlField */
4954         uint32_t l2p_duration;  /* L2pDurationField */
4955         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4956         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4957         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4958                                  * (LMPM_PMG_SEL) */
4959         uint32_t u_timestamp;   /* indicate when the date and time of the
4960                                  * compilation */
4961         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
4962 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4963
4964 /*
4965  * UMAC error struct - relevant starting from family 8000 chip.
4966  * Note: This structure is read from the device with IO accesses,
4967  * and the reading already does the endian conversion. As it is
4968  * read with u32-sized accesses, any members with a different size
4969  * need to be ordered correctly though!
4970  */
4971 struct iwm_umac_error_event_table {
4972         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4973         uint32_t error_id;      /* type of error */
4974         uint32_t blink1;        /* branch link */
4975         uint32_t blink2;        /* branch link */
4976         uint32_t ilink1;        /* interrupt link */
4977         uint32_t ilink2;        /* interrupt link */
4978         uint32_t data1;         /* error-specific data */
4979         uint32_t data2;         /* error-specific data */
4980         uint32_t data3;         /* error-specific data */
4981         uint32_t umac_major;
4982         uint32_t umac_minor;
4983         uint32_t frame_pointer; /* core register 27*/
4984         uint32_t stack_pointer; /* core register 28 */
4985         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
4986         uint32_t nic_isr_pref;  /* ISR status register */
4987 } __packed;
4988
4989 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4990 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4991
4992 #ifdef IWM_DEBUG
4993 struct {
4994         const char *name;
4995         uint8_t num;
4996 } advanced_lookup[] = {
4997         { "NMI_INTERRUPT_WDG", 0x34 },
4998         { "SYSASSERT", 0x35 },
4999         { "UCODE_VERSION_MISMATCH", 0x37 },
5000         { "BAD_COMMAND", 0x38 },
5001         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5002         { "FATAL_ERROR", 0x3D },
5003         { "NMI_TRM_HW_ERR", 0x46 },
5004         { "NMI_INTERRUPT_TRM", 0x4C },
5005         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5006         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5007         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5008         { "NMI_INTERRUPT_HOST", 0x66 },
5009         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5010         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5011         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5012         { "ADVANCED_SYSASSERT", 0 },
5013 };
5014
5015 static const char *
5016 iwm_desc_lookup(uint32_t num)
5017 {
5018         int i;
5019
5020         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5021                 if (advanced_lookup[i].num == num)
5022                         return advanced_lookup[i].name;
5023
5024         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5025         return advanced_lookup[i].name;
5026 }
5027
5028 static void
5029 iwm_nic_umac_error(struct iwm_softc *sc)
5030 {
5031         struct iwm_umac_error_event_table table;
5032         uint32_t base;
5033
5034         base = sc->umac_error_event_table;
5035
5036         if (base < 0x800000) {
5037                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5038                     base);
5039                 return;
5040         }
5041
5042         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5043                 device_printf(sc->sc_dev, "reading errlog failed\n");
5044                 return;
5045         }
5046
5047         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5048                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5049                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5050                     sc->sc_flags, table.valid);
5051         }
5052
5053         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5054                 iwm_desc_lookup(table.error_id));
5055         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5056         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5057         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5058             table.ilink1);
5059         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5060             table.ilink2);
5061         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5062         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5063         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5064         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5065         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5066         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5067             table.frame_pointer);
5068         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5069             table.stack_pointer);
5070         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5071         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5072             table.nic_isr_pref);
5073 }
5074
5075 /*
5076  * Support for dumping the error log seemed like a good idea ...
5077  * but it's mostly hex junk and the only sensible thing is the
5078  * hw/ucode revision (which we know anyway).  Since it's here,
5079  * I'll just leave it in, just in case e.g. the Intel guys want to
5080  * help us decipher some "ADVANCED_SYSASSERT" later.
5081  */
5082 static void
5083 iwm_nic_error(struct iwm_softc *sc)
5084 {
5085         struct iwm_error_event_table table;
5086         uint32_t base;
5087
5088         device_printf(sc->sc_dev, "dumping device error log\n");
5089         base = sc->error_event_table;
5090         if (base < 0x800000) {
5091                 device_printf(sc->sc_dev,
5092                     "Invalid error log pointer 0x%08x\n", base);
5093                 return;
5094         }
5095
5096         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5097                 device_printf(sc->sc_dev, "reading errlog failed\n");
5098                 return;
5099         }
5100
5101         if (!table.valid) {
5102                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5103                 return;
5104         }
5105
5106         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5107                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5108                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5109                     sc->sc_flags, table.valid);
5110         }
5111
5112         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5113             iwm_desc_lookup(table.error_id));
5114         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5115             table.trm_hw_status0);
5116         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5117             table.trm_hw_status1);
5118         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5119         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5120         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5121         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5122         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5123         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5124         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5125         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5126         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5127         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5128         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5129         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5130             table.fw_rev_type);
5131         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5132         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5133         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5134         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5135         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5136         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5137         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5138         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5139         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5140         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5141         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5142         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5143         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5144         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5145         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5146         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5147         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5148         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5149         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5150
5151         if (sc->umac_error_event_table)
5152                 iwm_nic_umac_error(sc);
5153 }
5154 #endif
5155
5156 static void
5157 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5158 {
5159         struct ieee80211com *ic = &sc->sc_ic;
5160         struct iwm_cmd_response *cresp;
5161         struct mbuf *m1;
5162         uint32_t offset = 0;
5163         uint32_t maxoff = IWM_RBUF_SIZE;
5164         uint32_t nextoff;
5165         boolean_t stolen = FALSE;
5166
5167 #define HAVEROOM(a)     \
5168     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5169
5170         while (HAVEROOM(offset)) {
5171                 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5172                     offset);
5173                 int qid, idx, code, len;
5174
5175                 qid = pkt->hdr.qid;
5176                 idx = pkt->hdr.idx;
5177
5178                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5179
5180                 /*
5181                  * randomly get these from the firmware, no idea why.
5182                  * they at least seem harmless, so just ignore them for now
5183                  */
5184                 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5185                     pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5186                         break;
5187                 }
5188
5189                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5190                     "rx packet qid=%d idx=%d type=%x\n",
5191                     qid & ~0x80, pkt->hdr.idx, code);
5192
5193                 len = le32toh(pkt->len_n_flags) & IWM_FH_RSCSR_FRAME_SIZE_MSK;
5194                 len += sizeof(uint32_t); /* account for status word */
5195                 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5196
5197                 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5198
5199                 switch (code) {
5200                 case IWM_REPLY_RX_PHY_CMD:
5201                         iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5202                         break;
5203
5204                 case IWM_REPLY_RX_MPDU_CMD: {
5205                         /*
5206                          * If this is the last frame in the RX buffer, we
5207                          * can directly feed the mbuf to the sharks here.
5208                          */
5209                         struct iwm_rx_packet *nextpkt = mtodoff(m,
5210                             struct iwm_rx_packet *, nextoff);
5211                         if (!HAVEROOM(nextoff) ||
5212                             (nextpkt->hdr.code == 0 &&
5213                              (nextpkt->hdr.qid & ~0x80) == 0 &&
5214                              nextpkt->hdr.idx == 0) ||
5215                             (nextpkt->len_n_flags ==
5216                              htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5217                                 if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5218                                         stolen = FALSE;
5219                                         /* Make sure we abort the loop */
5220                                         nextoff = maxoff;
5221                                 }
5222                                 break;
5223                         }
5224
5225                         /*
5226                          * Use m_copym instead of m_split, because that
5227                          * makes it easier to keep a valid rx buffer in
5228                          * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5229                          *
5230                          * We need to start m_copym() at offset 0, to get the
5231                          * M_PKTHDR flag preserved.
5232                          */
5233                         m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5234                         if (m1) {
5235                                 if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5236                                         stolen = TRUE;
5237                                 else
5238                                         m_freem(m1);
5239                         }
5240                         break;
5241                 }
5242
5243                 case IWM_TX_CMD:
5244                         iwm_mvm_rx_tx_cmd(sc, pkt);
5245                         break;
5246
5247                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5248                         struct iwm_missed_beacons_notif *resp;
5249                         int missed;
5250
5251                         /* XXX look at mac_id to determine interface ID */
5252                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5253
5254                         resp = (void *)pkt->data;
5255                         missed = le32toh(resp->consec_missed_beacons);
5256
5257                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5258                             "%s: MISSED_BEACON: mac_id=%d, "
5259                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5260                             "num_rx=%d\n",
5261                             __func__,
5262                             le32toh(resp->mac_id),
5263                             le32toh(resp->consec_missed_beacons_since_last_rx),
5264                             le32toh(resp->consec_missed_beacons),
5265                             le32toh(resp->num_expected_beacons),
5266                             le32toh(resp->num_recvd_beacons));
5267
5268                         /* Be paranoid */
5269                         if (vap == NULL)
5270                                 break;
5271
5272                         /* XXX no net80211 locking? */
5273                         if (vap->iv_state == IEEE80211_S_RUN &&
5274                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5275                                 if (missed > vap->iv_bmissthreshold) {
5276                                         /* XXX bad locking; turn into task */
5277                                         IWM_UNLOCK(sc);
5278                                         ieee80211_beacon_miss(ic);
5279                                         IWM_LOCK(sc);
5280                                 }
5281                         }
5282
5283                         break;
5284                 }
5285
5286                 case IWM_MFUART_LOAD_NOTIFICATION:
5287                         break;
5288
5289                 case IWM_MVM_ALIVE:
5290                         break;
5291
5292                 case IWM_CALIB_RES_NOTIF_PHY_DB:
5293                         break;
5294
5295                 case IWM_STATISTICS_NOTIFICATION: {
5296                         struct iwm_notif_statistics *stats;
5297                         stats = (void *)pkt->data;
5298                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5299                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5300                         break;
5301                 }
5302
5303                 case IWM_NVM_ACCESS_CMD:
5304                 case IWM_MCC_UPDATE_CMD:
5305                         if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5306                                 memcpy(sc->sc_cmd_resp,
5307                                     pkt, sizeof(sc->sc_cmd_resp));
5308                         }
5309                         break;
5310
5311                 case IWM_MCC_CHUB_UPDATE_CMD: {
5312                         struct iwm_mcc_chub_notif *notif;
5313                         notif = (void *)pkt->data;
5314
5315                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5316                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5317                         sc->sc_fw_mcc[2] = '\0';
5318                         IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5319                             "fw source %d sent CC '%s'\n",
5320                             notif->source_id, sc->sc_fw_mcc);
5321                         break;
5322                 }
5323
5324                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5325                 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5326                                  IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5327                         struct iwm_dts_measurement_notif_v1 *notif;
5328
5329                         if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5330                                 device_printf(sc->sc_dev,
5331                                     "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5332                                 break;
5333                         }
5334                         notif = (void *)pkt->data;
5335                         IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5336                             "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5337                             notif->temp);
5338                         break;
5339                 }
5340
5341                 case IWM_PHY_CONFIGURATION_CMD:
5342                 case IWM_TX_ANT_CONFIGURATION_CMD:
5343                 case IWM_ADD_STA:
5344                 case IWM_MAC_CONTEXT_CMD:
5345                 case IWM_REPLY_SF_CFG_CMD:
5346                 case IWM_POWER_TABLE_CMD:
5347                 case IWM_PHY_CONTEXT_CMD:
5348                 case IWM_BINDING_CONTEXT_CMD:
5349                 case IWM_TIME_EVENT_CMD:
5350                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5351                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5352                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5353                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5354                 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5355                 case IWM_REPLY_BEACON_FILTERING_CMD:
5356                 case IWM_MAC_PM_POWER_TABLE:
5357                 case IWM_TIME_QUOTA_CMD:
5358                 case IWM_REMOVE_STA:
5359                 case IWM_TXPATH_FLUSH:
5360                 case IWM_LQ_CMD:
5361                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5362                                  IWM_FW_PAGING_BLOCK_CMD):
5363                 case IWM_BT_CONFIG:
5364                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5365                         cresp = (void *)pkt->data;
5366                         if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5367                                 memcpy(sc->sc_cmd_resp,
5368                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5369                         }
5370                         break;
5371
5372                 /* ignore */
5373                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5374                         break;
5375
5376                 case IWM_INIT_COMPLETE_NOTIF:
5377                         break;
5378
5379                 case IWM_SCAN_OFFLOAD_COMPLETE:
5380                         iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5381                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5382                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5383                                 ieee80211_runtask(ic, &sc->sc_es_task);
5384                         }
5385                         break;
5386
5387                 case IWM_SCAN_ITERATION_COMPLETE: {
5388                         struct iwm_lmac_scan_complete_notif *notif;
5389                         notif = (void *)pkt->data;
5390                         break;
5391                 }
5392
5393                 case IWM_SCAN_COMPLETE_UMAC:
5394                         iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5395                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5396                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5397                                 ieee80211_runtask(ic, &sc->sc_es_task);
5398                         }
5399                         break;
5400
5401                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5402                         struct iwm_umac_scan_iter_complete_notif *notif;
5403                         notif = (void *)pkt->data;
5404
5405                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5406                             "complete, status=0x%x, %d channels scanned\n",
5407                             notif->status, notif->scanned_channels);
5408                         break;
5409                 }
5410
5411                 case IWM_REPLY_ERROR: {
5412                         struct iwm_error_resp *resp;
5413                         resp = (void *)pkt->data;
5414
5415                         device_printf(sc->sc_dev,
5416                             "firmware error 0x%x, cmd 0x%x\n",
5417                             le32toh(resp->error_type),
5418                             resp->cmd_id);
5419                         break;
5420                 }
5421
5422                 case IWM_TIME_EVENT_NOTIFICATION: {
5423                         struct iwm_time_event_notif *notif;
5424                         notif = (void *)pkt->data;
5425
5426                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5427                             "TE notif status = 0x%x action = 0x%x\n",
5428                             notif->status, notif->action);
5429                         break;
5430                 }
5431
5432                 case IWM_MCAST_FILTER_CMD:
5433                         break;
5434
5435                 case IWM_SCD_QUEUE_CFG: {
5436                         struct iwm_scd_txq_cfg_rsp *rsp;
5437                         rsp = (void *)pkt->data;
5438
5439                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5440                             "queue cfg token=0x%x sta_id=%d "
5441                             "tid=%d scd_queue=%d\n",
5442                             rsp->token, rsp->sta_id, rsp->tid,
5443                             rsp->scd_queue);
5444                         break;
5445                 }
5446
5447                 default:
5448                         device_printf(sc->sc_dev,
5449                             "frame %d/%d %x UNHANDLED (this should "
5450                             "not happen)\n", qid & ~0x80, idx,
5451                             pkt->len_n_flags);
5452                         break;
5453                 }
5454
5455                 /*
5456                  * Why test bit 0x80?  The Linux driver:
5457                  *
5458                  * There is one exception:  uCode sets bit 15 when it
5459                  * originates the response/notification, i.e. when the
5460                  * response/notification is not a direct response to a
5461                  * command sent by the driver.  For example, uCode issues
5462                  * IWM_REPLY_RX when it sends a received frame to the driver;
5463                  * it is not a direct response to any driver command.
5464                  *
5465                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5466                  * uses a slightly different format for pkt->hdr, and "qid"
5467                  * is actually the upper byte of a two-byte field.
5468                  */
5469                 if (!(qid & (1 << 7)))
5470                         iwm_cmd_done(sc, pkt);
5471
5472                 offset = nextoff;
5473         }
5474         if (stolen)
5475                 m_freem(m);
5476 #undef HAVEROOM
5477 }
5478
5479 /*
5480  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5481  * Basic structure from if_iwn
5482  */
5483 static void
5484 iwm_notif_intr(struct iwm_softc *sc)
5485 {
5486         uint16_t hw;
5487
5488         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5489             BUS_DMASYNC_POSTREAD);
5490
5491         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5492
5493         /*
5494          * Process responses
5495          */
5496         while (sc->rxq.cur != hw) {
5497                 struct iwm_rx_ring *ring = &sc->rxq;
5498                 struct iwm_rx_data *data = &ring->data[ring->cur];
5499
5500                 bus_dmamap_sync(ring->data_dmat, data->map,
5501                     BUS_DMASYNC_POSTREAD);
5502
5503                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5504                     "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5505                 iwm_handle_rxb(sc, data->m);
5506
5507                 ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5508         }
5509
5510         /*
5511          * Tell the firmware that it can reuse the ring entries that
5512          * we have just processed.
5513          * Seems like the hardware gets upset unless we align
5514          * the write by 8??
5515          */
5516         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5517         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5518 }
5519
5520 static void
5521 iwm_intr(void *arg)
5522 {
5523         struct iwm_softc *sc = arg;
5524         int handled = 0;
5525         int r1, r2, rv = 0;
5526         int isperiodic = 0;
5527
5528         IWM_LOCK(sc);
5529         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5530
5531         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5532                 uint32_t *ict = sc->ict_dma.vaddr;
5533                 int tmp;
5534
5535                 tmp = htole32(ict[sc->ict_cur]);
5536                 if (!tmp)
5537                         goto out_ena;
5538
5539                 /*
5540                  * ok, there was something.  keep plowing until we have all.
5541                  */
5542                 r1 = r2 = 0;
5543                 while (tmp) {
5544                         r1 |= tmp;
5545                         ict[sc->ict_cur] = 0;
5546                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5547                         tmp = htole32(ict[sc->ict_cur]);
5548                 }
5549
5550                 /* this is where the fun begins.  don't ask */
5551                 if (r1 == 0xffffffff)
5552                         r1 = 0;
5553
5554                 /* i am not expected to understand this */
5555                 if (r1 & 0xc0000)
5556                         r1 |= 0x8000;
5557                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5558         } else {
5559                 r1 = IWM_READ(sc, IWM_CSR_INT);
5560                 /* "hardware gone" (where, fishing?) */
5561                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5562                         goto out;
5563                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5564         }
5565         if (r1 == 0 && r2 == 0) {
5566                 goto out_ena;
5567         }
5568
5569         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5570
5571         /* Safely ignore these bits for debug checks below */
5572         r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5573
5574         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5575                 int i;
5576                 struct ieee80211com *ic = &sc->sc_ic;
5577                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5578
5579 #ifdef IWM_DEBUG
5580                 iwm_nic_error(sc);
5581 #endif
5582                 /* Dump driver status (TX and RX rings) while we're here. */
5583                 device_printf(sc->sc_dev, "driver status:\n");
5584                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5585                         struct iwm_tx_ring *ring = &sc->txq[i];
5586                         device_printf(sc->sc_dev,
5587                             "  tx ring %2d: qid=%-2d cur=%-3d "
5588                             "queued=%-3d\n",
5589                             i, ring->qid, ring->cur, ring->queued);
5590                 }
5591                 device_printf(sc->sc_dev,
5592                     "  rx ring: cur=%d\n", sc->rxq.cur);
5593                 device_printf(sc->sc_dev,
5594                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5595
5596                 /* Don't stop the device; just do a VAP restart */
5597                 IWM_UNLOCK(sc);
5598
5599                 if (vap == NULL) {
5600                         printf("%s: null vap\n", __func__);
5601                         return;
5602                 }
5603
5604                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5605                     "restarting\n", __func__, vap->iv_state);
5606
5607                 ieee80211_restart_all(ic);
5608                 return;
5609         }
5610
5611         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5612                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5613                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5614                 iwm_stop(sc);
5615                 rv = 1;
5616                 goto out;
5617         }
5618
5619         /* firmware chunk loaded */
5620         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5621                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5622                 handled |= IWM_CSR_INT_BIT_FH_TX;
5623                 sc->sc_fw_chunk_done = 1;
5624                 wakeup(&sc->sc_fw);
5625         }
5626
5627         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5628                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5629                 if (iwm_check_rfkill(sc)) {
5630                         device_printf(sc->sc_dev,
5631                             "%s: rfkill switch, disabling interface\n",
5632                             __func__);
5633                         iwm_stop(sc);
5634                 }
5635         }
5636
5637         /*
5638          * The Linux driver uses periodic interrupts to avoid races.
5639          * We cargo-cult like it's going out of fashion.
5640          */
5641         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5642                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5643                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5644                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5645                         IWM_WRITE_1(sc,
5646                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5647                 isperiodic = 1;
5648         }
5649
5650         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5651                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5652                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5653
5654                 iwm_notif_intr(sc);
5655
5656                 /* enable periodic interrupt, see above */
5657                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5658                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5659                             IWM_CSR_INT_PERIODIC_ENA);
5660         }
5661
5662         if (__predict_false(r1 & ~handled))
5663                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5664                     "%s: unhandled interrupts: %x\n", __func__, r1);
5665         rv = 1;
5666
5667  out_ena:
5668         iwm_restore_interrupts(sc);
5669  out:
5670         IWM_UNLOCK(sc);
5671         return;
5672 }
5673
5674 /*
5675  * Autoconf glue-sniffing
5676  */
5677 #define PCI_VENDOR_INTEL                0x8086
5678 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5679 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5680 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5681 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5682 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5683 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5684 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5685 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5686 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5687 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5688
5689 static const struct iwm_devices {
5690         uint16_t                device;
5691         const struct iwm_cfg    *cfg;
5692 } iwm_devices[] = {
5693         { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5694         { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5695         { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5696         { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5697         { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5698         { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5699         { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5700         { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5701         { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5702         { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5703 };
5704
5705 static int
5706 iwm_probe(device_t dev)
5707 {
5708         int i;
5709
5710         for (i = 0; i < nitems(iwm_devices); i++) {
5711                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5712                     pci_get_device(dev) == iwm_devices[i].device) {
5713                         device_set_desc(dev, iwm_devices[i].cfg->name);
5714                         return (BUS_PROBE_DEFAULT);
5715                 }
5716         }
5717
5718         return (ENXIO);
5719 }
5720
5721 static int
5722 iwm_dev_check(device_t dev)
5723 {
5724         struct iwm_softc *sc;
5725         uint16_t devid;
5726         int i;
5727
5728         sc = device_get_softc(dev);
5729
5730         devid = pci_get_device(dev);
5731         for (i = 0; i < nitems(iwm_devices); i++) {
5732                 if (iwm_devices[i].device == devid) {
5733                         sc->cfg = iwm_devices[i].cfg;
5734                         return (0);
5735                 }
5736         }
5737         device_printf(dev, "unknown adapter type\n");
5738         return ENXIO;
5739 }
5740
5741 /* PCI registers */
5742 #define PCI_CFG_RETRY_TIMEOUT   0x041
5743
5744 static int
5745 iwm_pci_attach(device_t dev)
5746 {
5747         struct iwm_softc *sc;
5748         int count, error, rid;
5749         uint16_t reg;
5750
5751         sc = device_get_softc(dev);
5752
5753         /* We disable the RETRY_TIMEOUT register (0x41) to keep
5754          * PCI Tx retries from interfering with C3 CPU state */
5755         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5756
5757         /* Enable bus-mastering and hardware bug workaround. */
5758         pci_enable_busmaster(dev);
5759         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5760         /* if !MSI */
5761         if (reg & PCIM_STATUS_INTxSTATE) {
5762                 reg &= ~PCIM_STATUS_INTxSTATE;
5763         }
5764         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5765
5766         rid = PCIR_BAR(0);
5767         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5768             RF_ACTIVE);
5769         if (sc->sc_mem == NULL) {
5770                 device_printf(sc->sc_dev, "can't map mem space\n");
5771                 return (ENXIO);
5772         }
5773         sc->sc_st = rman_get_bustag(sc->sc_mem);
5774         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5775
5776         /* Install interrupt handler. */
5777         count = 1;
5778         rid = 0;
5779         if (pci_alloc_msi(dev, &count) == 0)
5780                 rid = 1;
5781         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5782             (rid != 0 ? 0 : RF_SHAREABLE));
5783         if (sc->sc_irq == NULL) {
5784                 device_printf(dev, "can't map interrupt\n");
5785                         return (ENXIO);
5786         }
5787         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5788             NULL, iwm_intr, sc, &sc->sc_ih);
5789         if (sc->sc_ih == NULL) {
5790                 device_printf(dev, "can't establish interrupt");
5791                         return (ENXIO);
5792         }
5793         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5794
5795         return (0);
5796 }
5797
5798 static void
5799 iwm_pci_detach(device_t dev)
5800 {
5801         struct iwm_softc *sc = device_get_softc(dev);
5802
5803         if (sc->sc_irq != NULL) {
5804                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5805                 bus_release_resource(dev, SYS_RES_IRQ,
5806                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5807                 pci_release_msi(dev);
5808         }
5809         if (sc->sc_mem != NULL)
5810                 bus_release_resource(dev, SYS_RES_MEMORY,
5811                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5812 }
5813
5814
5815
5816 static int
5817 iwm_attach(device_t dev)
5818 {
5819         struct iwm_softc *sc = device_get_softc(dev);
5820         struct ieee80211com *ic = &sc->sc_ic;
5821         int error;
5822         int txq_i, i;
5823
5824         sc->sc_dev = dev;
5825         sc->sc_attached = 1;
5826         IWM_LOCK_INIT(sc);
5827         mbufq_init(&sc->sc_snd, ifqmaxlen);
5828         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5829         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5830         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5831
5832         sc->sc_notif_wait = iwm_notification_wait_init(sc);
5833         if (sc->sc_notif_wait == NULL) {
5834                 device_printf(dev, "failed to init notification wait struct\n");
5835                 goto fail;
5836         }
5837
5838         sc->sf_state = IWM_SF_UNINIT;
5839
5840         /* Init phy db */
5841         sc->sc_phy_db = iwm_phy_db_init(sc);
5842         if (!sc->sc_phy_db) {
5843                 device_printf(dev, "Cannot init phy_db\n");
5844                 goto fail;
5845         }
5846
5847         /* Set EBS as successful as long as not stated otherwise by the FW. */
5848         sc->last_ebs_successful = TRUE;
5849
5850         /* PCI attach */
5851         error = iwm_pci_attach(dev);
5852         if (error != 0)
5853                 goto fail;
5854
5855         sc->sc_wantresp = -1;
5856
5857         /* Check device type */
5858         error = iwm_dev_check(dev);
5859         if (error != 0)
5860                 goto fail;
5861
5862         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5863         /*
5864          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5865          * changed, and now the revision step also includes bit 0-1 (no more
5866          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5867          * in the old format.
5868          */
5869         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
5870                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5871                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5872
5873         if (iwm_prepare_card_hw(sc) != 0) {
5874                 device_printf(dev, "could not initialize hardware\n");
5875                 goto fail;
5876         }
5877
5878         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5879                 int ret;
5880                 uint32_t hw_step;
5881
5882                 /*
5883                  * In order to recognize C step the driver should read the
5884                  * chip version id located at the AUX bus MISC address.
5885                  */
5886                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5887                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5888                 DELAY(2);
5889
5890                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5891                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5892                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5893                                    25000);
5894                 if (!ret) {
5895                         device_printf(sc->sc_dev,
5896                             "Failed to wake up the nic\n");
5897                         goto fail;
5898                 }
5899
5900                 if (iwm_nic_lock(sc)) {
5901                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5902                         hw_step |= IWM_ENABLE_WFPM;
5903                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5904                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5905                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5906                         if (hw_step == 0x3)
5907                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5908                                                 (IWM_SILICON_C_STEP << 2);
5909                         iwm_nic_unlock(sc);
5910                 } else {
5911                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
5912                         goto fail;
5913                 }
5914         }
5915
5916         /* special-case 7265D, it has the same PCI IDs. */
5917         if (sc->cfg == &iwm7265_cfg &&
5918             (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5919                 sc->cfg = &iwm7265d_cfg;
5920         }
5921
5922         /* Allocate DMA memory for firmware transfers. */
5923         if ((error = iwm_alloc_fwmem(sc)) != 0) {
5924                 device_printf(dev, "could not allocate memory for firmware\n");
5925                 goto fail;
5926         }
5927
5928         /* Allocate "Keep Warm" page. */
5929         if ((error = iwm_alloc_kw(sc)) != 0) {
5930                 device_printf(dev, "could not allocate keep warm page\n");
5931                 goto fail;
5932         }
5933
5934         /* We use ICT interrupts */
5935         if ((error = iwm_alloc_ict(sc)) != 0) {
5936                 device_printf(dev, "could not allocate ICT table\n");
5937                 goto fail;
5938         }
5939
5940         /* Allocate TX scheduler "rings". */
5941         if ((error = iwm_alloc_sched(sc)) != 0) {
5942                 device_printf(dev, "could not allocate TX scheduler rings\n");
5943                 goto fail;
5944         }
5945
5946         /* Allocate TX rings */
5947         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5948                 if ((error = iwm_alloc_tx_ring(sc,
5949                     &sc->txq[txq_i], txq_i)) != 0) {
5950                         device_printf(dev,
5951                             "could not allocate TX ring %d\n",
5952                             txq_i);
5953                         goto fail;
5954                 }
5955         }
5956
5957         /* Allocate RX ring. */
5958         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5959                 device_printf(dev, "could not allocate RX ring\n");
5960                 goto fail;
5961         }
5962
5963         /* Clear pending interrupts. */
5964         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5965
5966         ic->ic_softc = sc;
5967         ic->ic_name = device_get_nameunit(sc->sc_dev);
5968         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
5969         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
5970
5971         /* Set device capabilities. */
5972         ic->ic_caps =
5973             IEEE80211_C_STA |
5974             IEEE80211_C_WPA |           /* WPA/RSN */
5975             IEEE80211_C_WME |
5976             IEEE80211_C_PMGT |
5977             IEEE80211_C_SHSLOT |        /* short slot time supported */
5978             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
5979 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
5980             ;
5981         /* Advertise full-offload scanning */
5982         ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
5983         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5984                 sc->sc_phyctxt[i].id = i;
5985                 sc->sc_phyctxt[i].color = 0;
5986                 sc->sc_phyctxt[i].ref = 0;
5987                 sc->sc_phyctxt[i].channel = NULL;
5988         }
5989
5990         /* Default noise floor */
5991         sc->sc_noise = -96;
5992
5993         /* Max RSSI */
5994         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
5995
5996         sc->sc_preinit_hook.ich_func = iwm_preinit;
5997         sc->sc_preinit_hook.ich_arg = sc;
5998         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
5999                 device_printf(dev, "config_intrhook_establish failed\n");
6000                 goto fail;
6001         }
6002
6003 #ifdef IWM_DEBUG
6004         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6005             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6006             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6007 #endif
6008
6009         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6010             "<-%s\n", __func__);
6011
6012         return 0;
6013
6014         /* Free allocated memory if something failed during attachment. */
6015 fail:
6016         iwm_detach_local(sc, 0);
6017
6018         return ENXIO;
6019 }
6020
6021 static int
6022 iwm_is_valid_ether_addr(uint8_t *addr)
6023 {
6024         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6025
6026         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6027                 return (FALSE);
6028
6029         return (TRUE);
6030 }
6031
6032 static int
6033 iwm_wme_update(struct ieee80211com *ic)
6034 {
6035 #define IWM_EXP2(x)     ((1 << (x)) - 1)        /* CWmin = 2^ECWmin - 1 */
6036         struct iwm_softc *sc = ic->ic_softc;
6037         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6038         struct iwm_vap *ivp = IWM_VAP(vap);
6039         struct iwm_node *in;
6040         struct wmeParams tmp[WME_NUM_AC];
6041         int aci, error;
6042
6043         if (vap == NULL)
6044                 return (0);
6045
6046         IEEE80211_LOCK(ic);
6047         for (aci = 0; aci < WME_NUM_AC; aci++)
6048                 tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6049         IEEE80211_UNLOCK(ic);
6050
6051         IWM_LOCK(sc);
6052         for (aci = 0; aci < WME_NUM_AC; aci++) {
6053                 const struct wmeParams *ac = &tmp[aci];
6054                 ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6055                 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6056                 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6057                 ivp->queue_params[aci].edca_txop =
6058                     IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6059         }
6060         ivp->have_wme = TRUE;
6061         if (ivp->is_uploaded && vap->iv_bss != NULL) {
6062                 in = IWM_NODE(vap->iv_bss);
6063                 if (in->in_assoc) {
6064                         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6065                                 device_printf(sc->sc_dev,
6066                                     "%s: failed to update MAC\n", __func__);
6067                         }
6068                 }
6069         }
6070         IWM_UNLOCK(sc);
6071
6072         return (0);
6073 #undef IWM_EXP2
6074 }
6075
6076 static void
6077 iwm_preinit(void *arg)
6078 {
6079         struct iwm_softc *sc = arg;
6080         device_t dev = sc->sc_dev;
6081         struct ieee80211com *ic = &sc->sc_ic;
6082         int error;
6083
6084         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6085             "->%s\n", __func__);
6086
6087         IWM_LOCK(sc);
6088         if ((error = iwm_start_hw(sc)) != 0) {
6089                 device_printf(dev, "could not initialize hardware\n");
6090                 IWM_UNLOCK(sc);
6091                 goto fail;
6092         }
6093
6094         error = iwm_run_init_mvm_ucode(sc, 1);
6095         iwm_stop_device(sc);
6096         if (error) {
6097                 IWM_UNLOCK(sc);
6098                 goto fail;
6099         }
6100         device_printf(dev,
6101             "hw rev 0x%x, fw ver %s, address %s\n",
6102             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6103             sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6104
6105         /* not all hardware can do 5GHz band */
6106         if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6107                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6108                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6109         IWM_UNLOCK(sc);
6110
6111         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6112             ic->ic_channels);
6113
6114         /*
6115          * At this point we've committed - if we fail to do setup,
6116          * we now also have to tear down the net80211 state.
6117          */
6118         ieee80211_ifattach(ic);
6119         ic->ic_vap_create = iwm_vap_create;
6120         ic->ic_vap_delete = iwm_vap_delete;
6121         ic->ic_raw_xmit = iwm_raw_xmit;
6122         ic->ic_node_alloc = iwm_node_alloc;
6123         ic->ic_scan_start = iwm_scan_start;
6124         ic->ic_scan_end = iwm_scan_end;
6125         ic->ic_update_mcast = iwm_update_mcast;
6126         ic->ic_getradiocaps = iwm_init_channel_map;
6127         ic->ic_set_channel = iwm_set_channel;
6128         ic->ic_scan_curchan = iwm_scan_curchan;
6129         ic->ic_scan_mindwell = iwm_scan_mindwell;
6130         ic->ic_wme.wme_update = iwm_wme_update;
6131         ic->ic_parent = iwm_parent;
6132         ic->ic_transmit = iwm_transmit;
6133         iwm_radiotap_attach(sc);
6134         if (bootverbose)
6135                 ieee80211_announce(ic);
6136
6137         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6138             "<-%s\n", __func__);
6139         config_intrhook_disestablish(&sc->sc_preinit_hook);
6140
6141         return;
6142 fail:
6143         config_intrhook_disestablish(&sc->sc_preinit_hook);
6144         iwm_detach_local(sc, 0);
6145 }
6146
6147 /*
6148  * Attach the interface to 802.11 radiotap.
6149  */
6150 static void
6151 iwm_radiotap_attach(struct iwm_softc *sc)
6152 {
6153         struct ieee80211com *ic = &sc->sc_ic;
6154
6155         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6156             "->%s begin\n", __func__);
6157         ieee80211_radiotap_attach(ic,
6158             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6159                 IWM_TX_RADIOTAP_PRESENT,
6160             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6161                 IWM_RX_RADIOTAP_PRESENT);
6162         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6163             "->%s end\n", __func__);
6164 }
6165
6166 static struct ieee80211vap *
6167 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6168     enum ieee80211_opmode opmode, int flags,
6169     const uint8_t bssid[IEEE80211_ADDR_LEN],
6170     const uint8_t mac[IEEE80211_ADDR_LEN])
6171 {
6172         struct iwm_vap *ivp;
6173         struct ieee80211vap *vap;
6174
6175         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6176                 return NULL;
6177         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6178         vap = &ivp->iv_vap;
6179         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6180         vap->iv_bmissthreshold = 10;            /* override default */
6181         /* Override with driver methods. */
6182         ivp->iv_newstate = vap->iv_newstate;
6183         vap->iv_newstate = iwm_newstate;
6184
6185         ivp->id = IWM_DEFAULT_MACID;
6186         ivp->color = IWM_DEFAULT_COLOR;
6187
6188         ivp->have_wme = FALSE;
6189         ivp->ps_disabled = FALSE;
6190
6191         ieee80211_ratectl_init(vap);
6192         /* Complete setup. */
6193         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6194             mac);
6195         ic->ic_opmode = opmode;
6196
6197         return vap;
6198 }
6199
6200 static void
6201 iwm_vap_delete(struct ieee80211vap *vap)
6202 {
6203         struct iwm_vap *ivp = IWM_VAP(vap);
6204
6205         ieee80211_ratectl_deinit(vap);
6206         ieee80211_vap_detach(vap);
6207         free(ivp, M_80211_VAP);
6208 }
6209
6210 static void
6211 iwm_scan_start(struct ieee80211com *ic)
6212 {
6213         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6214         struct iwm_softc *sc = ic->ic_softc;
6215         int error;
6216
6217         IWM_LOCK(sc);
6218         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6219                 /* This should not be possible */
6220                 device_printf(sc->sc_dev,
6221                     "%s: Previous scan not completed yet\n", __func__);
6222         }
6223         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6224                 error = iwm_mvm_umac_scan(sc);
6225         else
6226                 error = iwm_mvm_lmac_scan(sc);
6227         if (error != 0) {
6228                 device_printf(sc->sc_dev, "could not initiate scan\n");
6229                 IWM_UNLOCK(sc);
6230                 ieee80211_cancel_scan(vap);
6231         } else {
6232                 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6233                 iwm_led_blink_start(sc);
6234                 IWM_UNLOCK(sc);
6235         }
6236 }
6237
6238 static void
6239 iwm_scan_end(struct ieee80211com *ic)
6240 {
6241         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6242         struct iwm_softc *sc = ic->ic_softc;
6243
6244         IWM_LOCK(sc);
6245         iwm_led_blink_stop(sc);
6246         if (vap->iv_state == IEEE80211_S_RUN)
6247                 iwm_mvm_led_enable(sc);
6248         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6249                 /*
6250                  * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6251                  * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6252                  * taskqueue.
6253                  */
6254                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6255                 iwm_mvm_scan_stop_wait(sc);
6256         }
6257         IWM_UNLOCK(sc);
6258
6259         /*
6260          * Make sure we don't race, if sc_es_task is still enqueued here.
6261          * This is to make sure that it won't call ieee80211_scan_done
6262          * when we have already started the next scan.
6263          */
6264         taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6265 }
6266
6267 static void
6268 iwm_update_mcast(struct ieee80211com *ic)
6269 {
6270 }
6271
6272 static void
6273 iwm_set_channel(struct ieee80211com *ic)
6274 {
6275 }
6276
6277 static void
6278 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6279 {
6280 }
6281
6282 static void
6283 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6284 {
6285         return;
6286 }
6287
6288 void
6289 iwm_init_task(void *arg1)
6290 {
6291         struct iwm_softc *sc = arg1;
6292
6293         IWM_LOCK(sc);
6294         while (sc->sc_flags & IWM_FLAG_BUSY)
6295                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6296         sc->sc_flags |= IWM_FLAG_BUSY;
6297         iwm_stop(sc);
6298         if (sc->sc_ic.ic_nrunning > 0)
6299                 iwm_init(sc);
6300         sc->sc_flags &= ~IWM_FLAG_BUSY;
6301         wakeup(&sc->sc_flags);
6302         IWM_UNLOCK(sc);
6303 }
6304
6305 static int
6306 iwm_resume(device_t dev)
6307 {
6308         struct iwm_softc *sc = device_get_softc(dev);
6309         int do_reinit = 0;
6310
6311         /*
6312          * We disable the RETRY_TIMEOUT register (0x41) to keep
6313          * PCI Tx retries from interfering with C3 CPU state.
6314          */
6315         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6316         iwm_init_task(device_get_softc(dev));
6317
6318         IWM_LOCK(sc);
6319         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6320                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6321                 do_reinit = 1;
6322         }
6323         IWM_UNLOCK(sc);
6324
6325         if (do_reinit)
6326                 ieee80211_resume_all(&sc->sc_ic);
6327
6328         return 0;
6329 }
6330
6331 static int
6332 iwm_suspend(device_t dev)
6333 {
6334         int do_stop = 0;
6335         struct iwm_softc *sc = device_get_softc(dev);
6336
6337         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6338
6339         ieee80211_suspend_all(&sc->sc_ic);
6340
6341         if (do_stop) {
6342                 IWM_LOCK(sc);
6343                 iwm_stop(sc);
6344                 sc->sc_flags |= IWM_FLAG_SCANNING;
6345                 IWM_UNLOCK(sc);
6346         }
6347
6348         return (0);
6349 }
6350
6351 static int
6352 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6353 {
6354         struct iwm_fw_info *fw = &sc->sc_fw;
6355         device_t dev = sc->sc_dev;
6356         int i;
6357
6358         if (!sc->sc_attached)
6359                 return 0;
6360         sc->sc_attached = 0;
6361
6362         if (do_net80211)
6363                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6364
6365         callout_drain(&sc->sc_led_blink_to);
6366         callout_drain(&sc->sc_watchdog_to);
6367         iwm_stop_device(sc);
6368         if (do_net80211) {
6369                 ieee80211_ifdetach(&sc->sc_ic);
6370         }
6371
6372         iwm_phy_db_free(sc->sc_phy_db);
6373         sc->sc_phy_db = NULL;
6374
6375         iwm_free_nvm_data(sc->nvm_data);
6376
6377         /* Free descriptor rings */
6378         iwm_free_rx_ring(sc, &sc->rxq);
6379         for (i = 0; i < nitems(sc->txq); i++)
6380                 iwm_free_tx_ring(sc, &sc->txq[i]);
6381
6382         /* Free firmware */
6383         if (fw->fw_fp != NULL)
6384                 iwm_fw_info_free(fw);
6385
6386         /* Free scheduler */
6387         iwm_dma_contig_free(&sc->sched_dma);
6388         iwm_dma_contig_free(&sc->ict_dma);
6389         iwm_dma_contig_free(&sc->kw_dma);
6390         iwm_dma_contig_free(&sc->fw_dma);
6391
6392         iwm_free_fw_paging(sc);
6393
6394         /* Finished with the hardware - detach things */
6395         iwm_pci_detach(dev);
6396
6397         if (sc->sc_notif_wait != NULL) {
6398                 iwm_notification_wait_free(sc->sc_notif_wait);
6399                 sc->sc_notif_wait = NULL;
6400         }
6401
6402         mbufq_drain(&sc->sc_snd);
6403         IWM_LOCK_DESTROY(sc);
6404
6405         return (0);
6406 }
6407
6408 static int
6409 iwm_detach(device_t dev)
6410 {
6411         struct iwm_softc *sc = device_get_softc(dev);
6412
6413         return (iwm_detach_local(sc, 1));
6414 }
6415
6416 static device_method_t iwm_pci_methods[] = {
6417         /* Device interface */
6418         DEVMETHOD(device_probe,         iwm_probe),
6419         DEVMETHOD(device_attach,        iwm_attach),
6420         DEVMETHOD(device_detach,        iwm_detach),
6421         DEVMETHOD(device_suspend,       iwm_suspend),
6422         DEVMETHOD(device_resume,        iwm_resume),
6423
6424         DEVMETHOD_END
6425 };
6426
6427 static driver_t iwm_pci_driver = {
6428         "iwm",
6429         iwm_pci_methods,
6430         sizeof (struct iwm_softc)
6431 };
6432
6433 static devclass_t iwm_devclass;
6434
6435 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6436 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6437 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6438 MODULE_DEPEND(iwm, wlan, 1, 1, 1);