]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
[iwm] Check for lar_disable tunable, and lar_enabled flag from NVM.
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109 #include "opt_iwm.h"
110
111 #include <sys/param.h>
112 #include <sys/bus.h>
113 #include <sys/conf.h>
114 #include <sys/endian.h>
115 #include <sys/firmware.h>
116 #include <sys/kernel.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/module.h>
121 #include <sys/proc.h>
122 #include <sys/rman.h>
123 #include <sys/socket.h>
124 #include <sys/sockio.h>
125 #include <sys/sysctl.h>
126 #include <sys/linker.h>
127
128 #include <machine/bus.h>
129 #include <machine/endian.h>
130 #include <machine/resource.h>
131
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcireg.h>
134
135 #include <net/bpf.h>
136
137 #include <net/if.h>
138 #include <net/if_var.h>
139 #include <net/if_arp.h>
140 #include <net/if_dl.h>
141 #include <net/if_media.h>
142 #include <net/if_types.h>
143
144 #include <netinet/in.h>
145 #include <netinet/in_systm.h>
146 #include <netinet/if_ether.h>
147 #include <netinet/ip.h>
148
149 #include <net80211/ieee80211_var.h>
150 #include <net80211/ieee80211_regdomain.h>
151 #include <net80211/ieee80211_ratectl.h>
152 #include <net80211/ieee80211_radiotap.h>
153
154 #include <dev/iwm/if_iwmreg.h>
155 #include <dev/iwm/if_iwmvar.h>
156 #include <dev/iwm/if_iwm_config.h>
157 #include <dev/iwm/if_iwm_debug.h>
158 #include <dev/iwm/if_iwm_notif_wait.h>
159 #include <dev/iwm/if_iwm_util.h>
160 #include <dev/iwm/if_iwm_binding.h>
161 #include <dev/iwm/if_iwm_phy_db.h>
162 #include <dev/iwm/if_iwm_mac_ctxt.h>
163 #include <dev/iwm/if_iwm_phy_ctxt.h>
164 #include <dev/iwm/if_iwm_time_event.h>
165 #include <dev/iwm/if_iwm_power.h>
166 #include <dev/iwm/if_iwm_scan.h>
167 #include <dev/iwm/if_iwm_sf.h>
168 #include <dev/iwm/if_iwm_sta.h>
169
170 #include <dev/iwm/if_iwm_pcie_trans.h>
171 #include <dev/iwm/if_iwm_led.h>
172 #include <dev/iwm/if_iwm_fw.h>
173
174 /* From DragonflyBSD */
175 #define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
176
177 const uint8_t iwm_nvm_channels[] = {
178         /* 2.4 GHz */
179         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
180         /* 5 GHz */
181         36, 40, 44, 48, 52, 56, 60, 64,
182         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
183         149, 153, 157, 161, 165
184 };
185 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
186     "IWM_NUM_CHANNELS is too small");
187
188 const uint8_t iwm_nvm_channels_8000[] = {
189         /* 2.4 GHz */
190         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
191         /* 5 GHz */
192         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
193         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
194         149, 153, 157, 161, 165, 169, 173, 177, 181
195 };
196 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
197     "IWM_NUM_CHANNELS_8000 is too small");
198
199 #define IWM_NUM_2GHZ_CHANNELS   14
200 #define IWM_N_HW_ADDR_MASK      0xF
201
202 /*
203  * XXX For now, there's simply a fixed set of rate table entries
204  * that are populated.
205  */
206 const struct iwm_rate {
207         uint8_t rate;
208         uint8_t plcp;
209 } iwm_rates[] = {
210         {   2,  IWM_RATE_1M_PLCP  },
211         {   4,  IWM_RATE_2M_PLCP  },
212         {  11,  IWM_RATE_5M_PLCP  },
213         {  22,  IWM_RATE_11M_PLCP },
214         {  12,  IWM_RATE_6M_PLCP  },
215         {  18,  IWM_RATE_9M_PLCP  },
216         {  24,  IWM_RATE_12M_PLCP },
217         {  36,  IWM_RATE_18M_PLCP },
218         {  48,  IWM_RATE_24M_PLCP },
219         {  72,  IWM_RATE_36M_PLCP },
220         {  96,  IWM_RATE_48M_PLCP },
221         { 108,  IWM_RATE_54M_PLCP },
222 };
223 #define IWM_RIDX_CCK    0
224 #define IWM_RIDX_OFDM   4
225 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
226 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
227 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
228
229 struct iwm_nvm_section {
230         uint16_t length;
231         uint8_t *data;
232 };
233
234 #define IWM_MVM_UCODE_ALIVE_TIMEOUT     hz
235 #define IWM_MVM_UCODE_CALIB_TIMEOUT     (2*hz)
236
237 struct iwm_mvm_alive_data {
238         int valid;
239         uint32_t scd_base_addr;
240 };
241
242 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
243 static int      iwm_firmware_store_section(struct iwm_softc *,
244                                            enum iwm_ucode_type,
245                                            const uint8_t *, size_t);
246 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
247 static void     iwm_fw_info_free(struct iwm_fw_info *);
248 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
249 static int      iwm_alloc_fwmem(struct iwm_softc *);
250 static int      iwm_alloc_sched(struct iwm_softc *);
251 static int      iwm_alloc_kw(struct iwm_softc *);
252 static int      iwm_alloc_ict(struct iwm_softc *);
253 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
256 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
257                                   int);
258 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
260 static void     iwm_enable_interrupts(struct iwm_softc *);
261 static void     iwm_restore_interrupts(struct iwm_softc *);
262 static void     iwm_disable_interrupts(struct iwm_softc *);
263 static void     iwm_ict_reset(struct iwm_softc *);
264 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
265 static void     iwm_stop_device(struct iwm_softc *);
266 static void     iwm_mvm_nic_config(struct iwm_softc *);
267 static int      iwm_nic_rx_init(struct iwm_softc *);
268 static int      iwm_nic_tx_init(struct iwm_softc *);
269 static int      iwm_nic_init(struct iwm_softc *);
270 static int      iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
271 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
272                                    uint16_t, uint8_t *, uint16_t *);
273 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
274                                      uint16_t *, uint32_t);
275 static uint32_t iwm_eeprom_channel_flags(uint16_t);
276 static void     iwm_add_channel_band(struct iwm_softc *,
277                     struct ieee80211_channel[], int, int *, int, size_t,
278                     const uint8_t[]);
279 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
280                     struct ieee80211_channel[]);
281 static struct iwm_nvm_data *
282         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
283                            const uint16_t *, const uint16_t *,
284                            const uint16_t *, const uint16_t *,
285                            const uint16_t *);
286 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
287 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
288                                                struct iwm_nvm_data *,
289                                                const uint16_t *,
290                                                const uint16_t *);
291 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
292                             const uint16_t *);
293 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
294 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
295                                   const uint16_t *);
296 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
297                                    const uint16_t *);
298 static void     iwm_set_radio_cfg(const struct iwm_softc *,
299                                   struct iwm_nvm_data *, uint32_t);
300 static struct iwm_nvm_data *
301         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
302 static int      iwm_nvm_init(struct iwm_softc *);
303 static int      iwm_pcie_load_section(struct iwm_softc *, uint8_t,
304                                       const struct iwm_fw_desc *);
305 static int      iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
306                                              bus_addr_t, uint32_t);
307 static int      iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
308                                                 const struct iwm_fw_sects *,
309                                                 int, int *);
310 static int      iwm_pcie_load_cpu_sections(struct iwm_softc *,
311                                            const struct iwm_fw_sects *,
312                                            int, int *);
313 static int      iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
314                                                const struct iwm_fw_sects *);
315 static int      iwm_pcie_load_given_ucode(struct iwm_softc *,
316                                           const struct iwm_fw_sects *);
317 static int      iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
318 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
319 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
320 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
321                                               enum iwm_ucode_type);
322 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
323 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
324 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
325                                             struct iwm_rx_phy_info *);
326 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
327                                       struct iwm_rx_packet *);
328 static int      iwm_get_noise(struct iwm_softc *sc,
329                     const struct iwm_mvm_statistics_rx_non_phy *);
330 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
331                                     uint32_t, boolean_t);
332 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
333                                          struct iwm_rx_packet *,
334                                          struct iwm_node *);
335 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
336 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
337 #if 0
338 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
339                                  uint16_t);
340 #endif
341 static const struct iwm_rate *
342         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
343                         struct mbuf *, struct iwm_tx_cmd *);
344 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
345                        struct ieee80211_node *, int);
346 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
347                              const struct ieee80211_bpf_params *);
348 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
349 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
350 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
351 static struct ieee80211_node *
352                 iwm_node_alloc(struct ieee80211vap *,
353                                const uint8_t[IEEE80211_ADDR_LEN]);
354 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
355 static int      iwm_media_change(struct ifnet *);
356 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
357 static void     iwm_endscan_cb(void *, int);
358 static int      iwm_send_bt_init_conf(struct iwm_softc *);
359 static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
360 static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
361 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
362 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
363 static int      iwm_init_hw(struct iwm_softc *);
364 static void     iwm_init(struct iwm_softc *);
365 static void     iwm_start(struct iwm_softc *);
366 static void     iwm_stop(struct iwm_softc *);
367 static void     iwm_watchdog(void *);
368 static void     iwm_parent(struct ieee80211com *);
369 #ifdef IWM_DEBUG
370 static const char *
371                 iwm_desc_lookup(uint32_t);
372 static void     iwm_nic_error(struct iwm_softc *);
373 static void     iwm_nic_umac_error(struct iwm_softc *);
374 #endif
375 static void     iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
376 static void     iwm_notif_intr(struct iwm_softc *);
377 static void     iwm_intr(void *);
378 static int      iwm_attach(device_t);
379 static int      iwm_is_valid_ether_addr(uint8_t *);
380 static void     iwm_preinit(void *);
381 static int      iwm_detach_local(struct iwm_softc *sc, int);
382 static void     iwm_init_task(void *);
383 static void     iwm_radiotap_attach(struct iwm_softc *);
384 static struct ieee80211vap *
385                 iwm_vap_create(struct ieee80211com *,
386                                const char [IFNAMSIZ], int,
387                                enum ieee80211_opmode, int,
388                                const uint8_t [IEEE80211_ADDR_LEN],
389                                const uint8_t [IEEE80211_ADDR_LEN]);
390 static void     iwm_vap_delete(struct ieee80211vap *);
391 static void     iwm_scan_start(struct ieee80211com *);
392 static void     iwm_scan_end(struct ieee80211com *);
393 static void     iwm_update_mcast(struct ieee80211com *);
394 static void     iwm_set_channel(struct ieee80211com *);
395 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
396 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
397 static int      iwm_detach(device_t);
398
399 static int      iwm_lar_disable = 0;
400 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
401
402 /*
403  * Firmware parser.
404  */
405
406 static int
407 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
408 {
409         const struct iwm_fw_cscheme_list *l = (const void *)data;
410
411         if (dlen < sizeof(*l) ||
412             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
413                 return EINVAL;
414
415         /* we don't actually store anything for now, always use s/w crypto */
416
417         return 0;
418 }
419
420 static int
421 iwm_firmware_store_section(struct iwm_softc *sc,
422     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
423 {
424         struct iwm_fw_sects *fws;
425         struct iwm_fw_desc *fwone;
426
427         if (type >= IWM_UCODE_TYPE_MAX)
428                 return EINVAL;
429         if (dlen < sizeof(uint32_t))
430                 return EINVAL;
431
432         fws = &sc->sc_fw.fw_sects[type];
433         if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
434                 return EINVAL;
435
436         fwone = &fws->fw_sect[fws->fw_count];
437
438         /* first 32bit are device load offset */
439         memcpy(&fwone->offset, data, sizeof(uint32_t));
440
441         /* rest is data */
442         fwone->data = data + sizeof(uint32_t);
443         fwone->len = dlen - sizeof(uint32_t);
444
445         fws->fw_count++;
446
447         return 0;
448 }
449
450 #define IWM_DEFAULT_SCAN_CHANNELS 40
451
452 /* iwlwifi: iwl-drv.c */
453 struct iwm_tlv_calib_data {
454         uint32_t ucode_type;
455         struct iwm_tlv_calib_ctrl calib;
456 } __packed;
457
458 static int
459 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
460 {
461         const struct iwm_tlv_calib_data *def_calib = data;
462         uint32_t ucode_type = le32toh(def_calib->ucode_type);
463
464         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
465                 device_printf(sc->sc_dev,
466                     "Wrong ucode_type %u for default "
467                     "calibration.\n", ucode_type);
468                 return EINVAL;
469         }
470
471         sc->sc_default_calib[ucode_type].flow_trigger =
472             def_calib->calib.flow_trigger;
473         sc->sc_default_calib[ucode_type].event_trigger =
474             def_calib->calib.event_trigger;
475
476         return 0;
477 }
478
479 static int
480 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
481                         struct iwm_ucode_capabilities *capa)
482 {
483         const struct iwm_ucode_api *ucode_api = (const void *)data;
484         uint32_t api_index = le32toh(ucode_api->api_index);
485         uint32_t api_flags = le32toh(ucode_api->api_flags);
486         int i;
487
488         if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
489                 device_printf(sc->sc_dev,
490                     "api flags index %d larger than supported by driver\n",
491                     api_index);
492                 /* don't return an error so we can load FW that has more bits */
493                 return 0;
494         }
495
496         for (i = 0; i < 32; i++) {
497                 if (api_flags & (1U << i))
498                         setbit(capa->enabled_api, i + 32 * api_index);
499         }
500
501         return 0;
502 }
503
504 static int
505 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
506                            struct iwm_ucode_capabilities *capa)
507 {
508         const struct iwm_ucode_capa *ucode_capa = (const void *)data;
509         uint32_t api_index = le32toh(ucode_capa->api_index);
510         uint32_t api_flags = le32toh(ucode_capa->api_capa);
511         int i;
512
513         if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
514                 device_printf(sc->sc_dev,
515                     "capa flags index %d larger than supported by driver\n",
516                     api_index);
517                 /* don't return an error so we can load FW that has more bits */
518                 return 0;
519         }
520
521         for (i = 0; i < 32; i++) {
522                 if (api_flags & (1U << i))
523                         setbit(capa->enabled_capa, i + 32 * api_index);
524         }
525
526         return 0;
527 }
528
529 static void
530 iwm_fw_info_free(struct iwm_fw_info *fw)
531 {
532         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
533         fw->fw_fp = NULL;
534         /* don't touch fw->fw_status */
535         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
536 }
537
538 static int
539 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
540 {
541         struct iwm_fw_info *fw = &sc->sc_fw;
542         const struct iwm_tlv_ucode_header *uhdr;
543         const struct iwm_ucode_tlv *tlv;
544         struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
545         enum iwm_ucode_tlv_type tlv_type;
546         const struct firmware *fwp;
547         const uint8_t *data;
548         uint32_t tlv_len;
549         uint32_t usniffer_img;
550         const uint8_t *tlv_data;
551         uint32_t paging_mem_size;
552         int num_of_cpus;
553         int error = 0;
554         size_t len;
555
556         if (fw->fw_status == IWM_FW_STATUS_DONE &&
557             ucode_type != IWM_UCODE_INIT)
558                 return 0;
559
560         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
561                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
562         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
563
564         if (fw->fw_fp != NULL)
565                 iwm_fw_info_free(fw);
566
567         /*
568          * Load firmware into driver memory.
569          * fw_fp will be set.
570          */
571         IWM_UNLOCK(sc);
572         fwp = firmware_get(sc->cfg->fw_name);
573         IWM_LOCK(sc);
574         if (fwp == NULL) {
575                 device_printf(sc->sc_dev,
576                     "could not read firmware %s (error %d)\n",
577                     sc->cfg->fw_name, error);
578                 goto out;
579         }
580         fw->fw_fp = fwp;
581
582         /* (Re-)Initialize default values. */
583         capa->flags = 0;
584         capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
585         capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
586         memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
587         memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
588         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
589
590         /*
591          * Parse firmware contents
592          */
593
594         uhdr = (const void *)fw->fw_fp->data;
595         if (*(const uint32_t *)fw->fw_fp->data != 0
596             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
597                 device_printf(sc->sc_dev, "invalid firmware %s\n",
598                     sc->cfg->fw_name);
599                 error = EINVAL;
600                 goto out;
601         }
602
603         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
604             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
605             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
606             IWM_UCODE_API(le32toh(uhdr->ver)));
607         data = uhdr->data;
608         len = fw->fw_fp->datasize - sizeof(*uhdr);
609
610         while (len >= sizeof(*tlv)) {
611                 len -= sizeof(*tlv);
612                 tlv = (const void *)data;
613
614                 tlv_len = le32toh(tlv->length);
615                 tlv_type = le32toh(tlv->type);
616                 tlv_data = tlv->data;
617
618                 if (len < tlv_len) {
619                         device_printf(sc->sc_dev,
620                             "firmware too short: %zu bytes\n",
621                             len);
622                         error = EINVAL;
623                         goto parse_out;
624                 }
625                 len -= roundup2(tlv_len, 4);
626                 data += sizeof(tlv) + roundup2(tlv_len, 4);
627
628                 switch ((int)tlv_type) {
629                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
630                         if (tlv_len != sizeof(uint32_t)) {
631                                 device_printf(sc->sc_dev,
632                                     "%s: PROBE_MAX_LEN (%d) != sizeof(uint32_t)\n",
633                                     __func__,
634                                     (int) tlv_len);
635                                 error = EINVAL;
636                                 goto parse_out;
637                         }
638                         capa->max_probe_length =
639                             le32_to_cpup((const uint32_t *)tlv_data);
640                         /* limit it to something sensible */
641                         if (capa->max_probe_length >
642                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
643                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
644                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
645                                     "ridiculous\n", __func__);
646                                 error = EINVAL;
647                                 goto parse_out;
648                         }
649                         break;
650                 case IWM_UCODE_TLV_PAN:
651                         if (tlv_len) {
652                                 device_printf(sc->sc_dev,
653                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
654                                     __func__,
655                                     (int) tlv_len);
656                                 error = EINVAL;
657                                 goto parse_out;
658                         }
659                         capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
660                         break;
661                 case IWM_UCODE_TLV_FLAGS:
662                         if (tlv_len < sizeof(uint32_t)) {
663                                 device_printf(sc->sc_dev,
664                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
665                                     __func__,
666                                     (int) tlv_len);
667                                 error = EINVAL;
668                                 goto parse_out;
669                         }
670                         if (tlv_len % sizeof(uint32_t)) {
671                                 device_printf(sc->sc_dev,
672                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) %% sizeof(uint32_t)\n",
673                                     __func__,
674                                     (int) tlv_len);
675                                 error = EINVAL;
676                                 goto parse_out;
677                         }
678                         /*
679                          * Apparently there can be many flags, but Linux driver
680                          * parses only the first one, and so do we.
681                          *
682                          * XXX: why does this override IWM_UCODE_TLV_PAN?
683                          * Intentional or a bug?  Observations from
684                          * current firmware file:
685                          *  1) TLV_PAN is parsed first
686                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
687                          * ==> this resets TLV_PAN to itself... hnnnk
688                          */
689                         capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
690                         break;
691                 case IWM_UCODE_TLV_CSCHEME:
692                         if ((error = iwm_store_cscheme(sc,
693                             tlv_data, tlv_len)) != 0) {
694                                 device_printf(sc->sc_dev,
695                                     "%s: iwm_store_cscheme(): returned %d\n",
696                                     __func__,
697                                     error);
698                                 goto parse_out;
699                         }
700                         break;
701                 case IWM_UCODE_TLV_NUM_OF_CPU:
702                         if (tlv_len != sizeof(uint32_t)) {
703                                 device_printf(sc->sc_dev,
704                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
705                                     __func__,
706                                     (int) tlv_len);
707                                 error = EINVAL;
708                                 goto parse_out;
709                         }
710                         num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
711                         if (num_of_cpus == 2) {
712                                 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
713                                         TRUE;
714                                 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
715                                         TRUE;
716                                 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
717                                         TRUE;
718                         } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
719                                 device_printf(sc->sc_dev,
720                                     "%s: Driver supports only 1 or 2 CPUs\n",
721                                     __func__);
722                                 error = EINVAL;
723                                 goto parse_out;
724                         }
725                         break;
726                 case IWM_UCODE_TLV_SEC_RT:
727                         if ((error = iwm_firmware_store_section(sc,
728                             IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
729                                 device_printf(sc->sc_dev,
730                                     "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
731                                     __func__,
732                                     error);
733                                 goto parse_out;
734                         }
735                         break;
736                 case IWM_UCODE_TLV_SEC_INIT:
737                         if ((error = iwm_firmware_store_section(sc,
738                             IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
739                                 device_printf(sc->sc_dev,
740                                     "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
741                                     __func__,
742                                     error);
743                                 goto parse_out;
744                         }
745                         break;
746                 case IWM_UCODE_TLV_SEC_WOWLAN:
747                         if ((error = iwm_firmware_store_section(sc,
748                             IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
749                                 device_printf(sc->sc_dev,
750                                     "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
751                                     __func__,
752                                     error);
753                                 goto parse_out;
754                         }
755                         break;
756                 case IWM_UCODE_TLV_DEF_CALIB:
757                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
758                                 device_printf(sc->sc_dev,
759                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
760                                     __func__,
761                                     (int) tlv_len,
762                                     (int) sizeof(struct iwm_tlv_calib_data));
763                                 error = EINVAL;
764                                 goto parse_out;
765                         }
766                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
767                                 device_printf(sc->sc_dev,
768                                     "%s: iwm_set_default_calib() failed: %d\n",
769                                     __func__,
770                                     error);
771                                 goto parse_out;
772                         }
773                         break;
774                 case IWM_UCODE_TLV_PHY_SKU:
775                         if (tlv_len != sizeof(uint32_t)) {
776                                 error = EINVAL;
777                                 device_printf(sc->sc_dev,
778                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
779                                     __func__,
780                                     (int) tlv_len);
781                                 goto parse_out;
782                         }
783                         sc->sc_fw.phy_config =
784                             le32_to_cpup((const uint32_t *)tlv_data);
785                         sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
786                                                   IWM_FW_PHY_CFG_TX_CHAIN) >>
787                                                   IWM_FW_PHY_CFG_TX_CHAIN_POS;
788                         sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
789                                                   IWM_FW_PHY_CFG_RX_CHAIN) >>
790                                                   IWM_FW_PHY_CFG_RX_CHAIN_POS;
791                         break;
792
793                 case IWM_UCODE_TLV_API_CHANGES_SET: {
794                         if (tlv_len != sizeof(struct iwm_ucode_api)) {
795                                 error = EINVAL;
796                                 goto parse_out;
797                         }
798                         if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
799                                 error = EINVAL;
800                                 goto parse_out;
801                         }
802                         break;
803                 }
804
805                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
806                         if (tlv_len != sizeof(struct iwm_ucode_capa)) {
807                                 error = EINVAL;
808                                 goto parse_out;
809                         }
810                         if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
811                                 error = EINVAL;
812                                 goto parse_out;
813                         }
814                         break;
815                 }
816
817                 case 48: /* undocumented TLV */
818                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
819                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
820                         /* ignore, not used by current driver */
821                         break;
822
823                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
824                         if ((error = iwm_firmware_store_section(sc,
825                             IWM_UCODE_REGULAR_USNIFFER, tlv_data,
826                             tlv_len)) != 0)
827                                 goto parse_out;
828                         break;
829
830                 case IWM_UCODE_TLV_PAGING:
831                         if (tlv_len != sizeof(uint32_t)) {
832                                 error = EINVAL;
833                                 goto parse_out;
834                         }
835                         paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
836
837                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
838                             "%s: Paging: paging enabled (size = %u bytes)\n",
839                             __func__, paging_mem_size);
840                         if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
841                                 device_printf(sc->sc_dev,
842                                         "%s: Paging: driver supports up to %u bytes for paging image\n",
843                                         __func__, IWM_MAX_PAGING_IMAGE_SIZE);
844                                 error = EINVAL;
845                                 goto out;
846                         }
847                         if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
848                                 device_printf(sc->sc_dev,
849                                     "%s: Paging: image isn't multiple %u\n",
850                                     __func__, IWM_FW_PAGING_SIZE);
851                                 error = EINVAL;
852                                 goto out;
853                         }
854
855                         sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
856                             paging_mem_size;
857                         usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
858                         sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
859                             paging_mem_size;
860                         break;
861
862                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
863                         if (tlv_len != sizeof(uint32_t)) {
864                                 error = EINVAL;
865                                 goto parse_out;
866                         }
867                         capa->n_scan_channels =
868                             le32_to_cpup((const uint32_t *)tlv_data);
869                         break;
870
871                 case IWM_UCODE_TLV_FW_VERSION:
872                         if (tlv_len != sizeof(uint32_t) * 3) {
873                                 error = EINVAL;
874                                 goto parse_out;
875                         }
876                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
877                             "%d.%d.%d",
878                             le32toh(((const uint32_t *)tlv_data)[0]),
879                             le32toh(((const uint32_t *)tlv_data)[1]),
880                             le32toh(((const uint32_t *)tlv_data)[2]));
881                         break;
882
883                 case IWM_UCODE_TLV_FW_MEM_SEG:
884                         break;
885
886                 default:
887                         device_printf(sc->sc_dev,
888                             "%s: unknown firmware section %d, abort\n",
889                             __func__, tlv_type);
890                         error = EINVAL;
891                         goto parse_out;
892                 }
893         }
894
895         KASSERT(error == 0, ("unhandled error"));
896
897  parse_out:
898         if (error) {
899                 device_printf(sc->sc_dev, "firmware parse error %d, "
900                     "section type %d\n", error, tlv_type);
901         }
902
903  out:
904         if (error) {
905                 fw->fw_status = IWM_FW_STATUS_NONE;
906                 if (fw->fw_fp != NULL)
907                         iwm_fw_info_free(fw);
908         } else
909                 fw->fw_status = IWM_FW_STATUS_DONE;
910         wakeup(&sc->sc_fw);
911
912         return error;
913 }
914
915 /*
916  * DMA resource routines
917  */
918
919 /* fwmem is used to load firmware onto the card */
920 static int
921 iwm_alloc_fwmem(struct iwm_softc *sc)
922 {
923         /* Must be aligned on a 16-byte boundary. */
924         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
925             IWM_FH_MEM_TB_MAX_LENGTH, 16);
926 }
927
928 /* tx scheduler rings.  not used? */
929 static int
930 iwm_alloc_sched(struct iwm_softc *sc)
931 {
932         /* TX scheduler rings must be aligned on a 1KB boundary. */
933         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
934             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
935 }
936
937 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
938 static int
939 iwm_alloc_kw(struct iwm_softc *sc)
940 {
941         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
942 }
943
944 /* interrupt cause table */
945 static int
946 iwm_alloc_ict(struct iwm_softc *sc)
947 {
948         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
949             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
950 }
951
952 static int
953 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
954 {
955         bus_size_t size;
956         int i, error;
957
958         ring->cur = 0;
959
960         /* Allocate RX descriptors (256-byte aligned). */
961         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
962         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
963         if (error != 0) {
964                 device_printf(sc->sc_dev,
965                     "could not allocate RX ring DMA memory\n");
966                 goto fail;
967         }
968         ring->desc = ring->desc_dma.vaddr;
969
970         /* Allocate RX status area (16-byte aligned). */
971         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
972             sizeof(*ring->stat), 16);
973         if (error != 0) {
974                 device_printf(sc->sc_dev,
975                     "could not allocate RX status DMA memory\n");
976                 goto fail;
977         }
978         ring->stat = ring->stat_dma.vaddr;
979
980         /* Create RX buffer DMA tag. */
981         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
982             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
983             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
984         if (error != 0) {
985                 device_printf(sc->sc_dev,
986                     "%s: could not create RX buf DMA tag, error %d\n",
987                     __func__, error);
988                 goto fail;
989         }
990
991         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
992         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
993         if (error != 0) {
994                 device_printf(sc->sc_dev,
995                     "%s: could not create RX buf DMA map, error %d\n",
996                     __func__, error);
997                 goto fail;
998         }
999         /*
1000          * Allocate and map RX buffers.
1001          */
1002         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1003                 struct iwm_rx_data *data = &ring->data[i];
1004                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1005                 if (error != 0) {
1006                         device_printf(sc->sc_dev,
1007                             "%s: could not create RX buf DMA map, error %d\n",
1008                             __func__, error);
1009                         goto fail;
1010                 }
1011                 data->m = NULL;
1012
1013                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1014                         goto fail;
1015                 }
1016         }
1017         return 0;
1018
1019 fail:   iwm_free_rx_ring(sc, ring);
1020         return error;
1021 }
1022
1023 static void
1024 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1025 {
1026         /* Reset the ring state */
1027         ring->cur = 0;
1028
1029         /*
1030          * The hw rx ring index in shared memory must also be cleared,
1031          * otherwise the discrepancy can cause reprocessing chaos.
1032          */
1033         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1034 }
1035
1036 static void
1037 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1038 {
1039         int i;
1040
1041         iwm_dma_contig_free(&ring->desc_dma);
1042         iwm_dma_contig_free(&ring->stat_dma);
1043
1044         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1045                 struct iwm_rx_data *data = &ring->data[i];
1046
1047                 if (data->m != NULL) {
1048                         bus_dmamap_sync(ring->data_dmat, data->map,
1049                             BUS_DMASYNC_POSTREAD);
1050                         bus_dmamap_unload(ring->data_dmat, data->map);
1051                         m_freem(data->m);
1052                         data->m = NULL;
1053                 }
1054                 if (data->map != NULL) {
1055                         bus_dmamap_destroy(ring->data_dmat, data->map);
1056                         data->map = NULL;
1057                 }
1058         }
1059         if (ring->spare_map != NULL) {
1060                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1061                 ring->spare_map = NULL;
1062         }
1063         if (ring->data_dmat != NULL) {
1064                 bus_dma_tag_destroy(ring->data_dmat);
1065                 ring->data_dmat = NULL;
1066         }
1067 }
1068
1069 static int
1070 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1071 {
1072         bus_addr_t paddr;
1073         bus_size_t size;
1074         size_t maxsize;
1075         int nsegments;
1076         int i, error;
1077
1078         ring->qid = qid;
1079         ring->queued = 0;
1080         ring->cur = 0;
1081
1082         /* Allocate TX descriptors (256-byte aligned). */
1083         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1084         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1085         if (error != 0) {
1086                 device_printf(sc->sc_dev,
1087                     "could not allocate TX ring DMA memory\n");
1088                 goto fail;
1089         }
1090         ring->desc = ring->desc_dma.vaddr;
1091
1092         /*
1093          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1094          * to allocate commands space for other rings.
1095          */
1096         if (qid > IWM_MVM_CMD_QUEUE)
1097                 return 0;
1098
1099         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1100         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1101         if (error != 0) {
1102                 device_printf(sc->sc_dev,
1103                     "could not allocate TX cmd DMA memory\n");
1104                 goto fail;
1105         }
1106         ring->cmd = ring->cmd_dma.vaddr;
1107
1108         /* FW commands may require more mapped space than packets. */
1109         if (qid == IWM_MVM_CMD_QUEUE) {
1110                 maxsize = IWM_RBUF_SIZE;
1111                 nsegments = 1;
1112         } else {
1113                 maxsize = MCLBYTES;
1114                 nsegments = IWM_MAX_SCATTER - 2;
1115         }
1116
1117         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1118             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1119             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1120         if (error != 0) {
1121                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1122                 goto fail;
1123         }
1124
1125         paddr = ring->cmd_dma.paddr;
1126         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1127                 struct iwm_tx_data *data = &ring->data[i];
1128
1129                 data->cmd_paddr = paddr;
1130                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1131                     + offsetof(struct iwm_tx_cmd, scratch);
1132                 paddr += sizeof(struct iwm_device_cmd);
1133
1134                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1135                 if (error != 0) {
1136                         device_printf(sc->sc_dev,
1137                             "could not create TX buf DMA map\n");
1138                         goto fail;
1139                 }
1140         }
1141         KASSERT(paddr == ring->cmd_dma.paddr + size,
1142             ("invalid physical address"));
1143         return 0;
1144
1145 fail:   iwm_free_tx_ring(sc, ring);
1146         return error;
1147 }
1148
1149 static void
1150 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1151 {
1152         int i;
1153
1154         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1155                 struct iwm_tx_data *data = &ring->data[i];
1156
1157                 if (data->m != NULL) {
1158                         bus_dmamap_sync(ring->data_dmat, data->map,
1159                             BUS_DMASYNC_POSTWRITE);
1160                         bus_dmamap_unload(ring->data_dmat, data->map);
1161                         m_freem(data->m);
1162                         data->m = NULL;
1163                 }
1164         }
1165         /* Clear TX descriptors. */
1166         memset(ring->desc, 0, ring->desc_dma.size);
1167         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1168             BUS_DMASYNC_PREWRITE);
1169         sc->qfullmsk &= ~(1 << ring->qid);
1170         ring->queued = 0;
1171         ring->cur = 0;
1172
1173         if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1174                 iwm_pcie_clear_cmd_in_flight(sc);
1175 }
1176
1177 static void
1178 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1179 {
1180         int i;
1181
1182         iwm_dma_contig_free(&ring->desc_dma);
1183         iwm_dma_contig_free(&ring->cmd_dma);
1184
1185         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1186                 struct iwm_tx_data *data = &ring->data[i];
1187
1188                 if (data->m != NULL) {
1189                         bus_dmamap_sync(ring->data_dmat, data->map,
1190                             BUS_DMASYNC_POSTWRITE);
1191                         bus_dmamap_unload(ring->data_dmat, data->map);
1192                         m_freem(data->m);
1193                         data->m = NULL;
1194                 }
1195                 if (data->map != NULL) {
1196                         bus_dmamap_destroy(ring->data_dmat, data->map);
1197                         data->map = NULL;
1198                 }
1199         }
1200         if (ring->data_dmat != NULL) {
1201                 bus_dma_tag_destroy(ring->data_dmat);
1202                 ring->data_dmat = NULL;
1203         }
1204 }
1205
1206 /*
1207  * High-level hardware frobbing routines
1208  */
1209
1210 static void
1211 iwm_enable_interrupts(struct iwm_softc *sc)
1212 {
1213         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1214         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1215 }
1216
1217 static void
1218 iwm_restore_interrupts(struct iwm_softc *sc)
1219 {
1220         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1221 }
1222
1223 static void
1224 iwm_disable_interrupts(struct iwm_softc *sc)
1225 {
1226         /* disable interrupts */
1227         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1228
1229         /* acknowledge all interrupts */
1230         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1231         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1232 }
1233
1234 static void
1235 iwm_ict_reset(struct iwm_softc *sc)
1236 {
1237         iwm_disable_interrupts(sc);
1238
1239         /* Reset ICT table. */
1240         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1241         sc->ict_cur = 0;
1242
1243         /* Set physical address of ICT table (4KB aligned). */
1244         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1245             IWM_CSR_DRAM_INT_TBL_ENABLE
1246             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1247             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1248             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1249
1250         /* Switch to ICT interrupt mode in driver. */
1251         sc->sc_flags |= IWM_FLAG_USE_ICT;
1252
1253         /* Re-enable interrupts. */
1254         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1255         iwm_enable_interrupts(sc);
1256 }
1257
1258 /* iwlwifi pcie/trans.c */
1259
1260 /*
1261  * Since this .. hard-resets things, it's time to actually
1262  * mark the first vap (if any) as having no mac context.
1263  * It's annoying, but since the driver is potentially being
1264  * stop/start'ed whilst active (thanks openbsd port!) we
1265  * have to correctly track this.
1266  */
1267 static void
1268 iwm_stop_device(struct iwm_softc *sc)
1269 {
1270         struct ieee80211com *ic = &sc->sc_ic;
1271         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1272         int chnl, qid;
1273         uint32_t mask = 0;
1274
1275         /* tell the device to stop sending interrupts */
1276         iwm_disable_interrupts(sc);
1277
1278         /*
1279          * FreeBSD-local: mark the first vap as not-uploaded,
1280          * so the next transition through auth/assoc
1281          * will correctly populate the MAC context.
1282          */
1283         if (vap) {
1284                 struct iwm_vap *iv = IWM_VAP(vap);
1285                 iv->phy_ctxt = NULL;
1286                 iv->is_uploaded = 0;
1287         }
1288
1289         /* device going down, Stop using ICT table */
1290         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1291
1292         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1293
1294         if (iwm_nic_lock(sc)) {
1295                 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1296
1297                 /* Stop each Tx DMA channel */
1298                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1299                         IWM_WRITE(sc,
1300                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1301                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1302                 }
1303
1304                 /* Wait for DMA channels to be idle */
1305                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1306                     5000)) {
1307                         device_printf(sc->sc_dev,
1308                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1309                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1310                 }
1311                 iwm_nic_unlock(sc);
1312         }
1313         iwm_pcie_rx_stop(sc);
1314
1315         /* Stop RX ring. */
1316         iwm_reset_rx_ring(sc, &sc->rxq);
1317
1318         /* Reset all TX rings. */
1319         for (qid = 0; qid < nitems(sc->txq); qid++)
1320                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1321
1322         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1323                 /* Power-down device's busmaster DMA clocks */
1324                 if (iwm_nic_lock(sc)) {
1325                         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1326                             IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1327                         iwm_nic_unlock(sc);
1328                 }
1329                 DELAY(5);
1330         }
1331
1332         /* Make sure (redundant) we've released our request to stay awake */
1333         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1334             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1335
1336         /* Stop the device, and put it in low power state */
1337         iwm_apm_stop(sc);
1338
1339         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1340          * Clean again the interrupt here
1341          */
1342         iwm_disable_interrupts(sc);
1343         /* stop and reset the on-board processor */
1344         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1345
1346         /*
1347          * Even if we stop the HW, we still want the RF kill
1348          * interrupt
1349          */
1350         iwm_enable_rfkill_int(sc);
1351         iwm_check_rfkill(sc);
1352 }
1353
1354 /* iwlwifi: mvm/ops.c */
1355 static void
1356 iwm_mvm_nic_config(struct iwm_softc *sc)
1357 {
1358         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1359         uint32_t reg_val = 0;
1360         uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1361
1362         radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1363             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1364         radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1365             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1366         radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1367             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1368
1369         /* SKU control */
1370         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1371             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1372         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1373             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1374
1375         /* radio configuration */
1376         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1377         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1378         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1379
1380         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1381
1382         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1383             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1384             radio_cfg_step, radio_cfg_dash);
1385
1386         /*
1387          * W/A : NIC is stuck in a reset state after Early PCIe power off
1388          * (PCIe power is lost before PERST# is asserted), causing ME FW
1389          * to lose ownership and not being able to obtain it back.
1390          */
1391         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1392                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1393                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1394                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1395         }
1396 }
1397
1398 static int
1399 iwm_nic_rx_init(struct iwm_softc *sc)
1400 {
1401         /*
1402          * Initialize RX ring.  This is from the iwn driver.
1403          */
1404         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1405
1406         /* Stop Rx DMA */
1407         iwm_pcie_rx_stop(sc);
1408
1409         if (!iwm_nic_lock(sc))
1410                 return EBUSY;
1411
1412         /* reset and flush pointers */
1413         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1414         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1415         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1416         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1417
1418         /* Set physical address of RX ring (256-byte aligned). */
1419         IWM_WRITE(sc,
1420             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1421
1422         /* Set physical address of RX status (16-byte aligned). */
1423         IWM_WRITE(sc,
1424             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1425
1426         /* Enable Rx DMA
1427          * XXX 5000 HW isn't supported by the iwm(4) driver.
1428          * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1429          *      the credit mechanism in 5000 HW RX FIFO
1430          * Direct rx interrupts to hosts
1431          * Rx buffer size 4 or 8k or 12k
1432          * RB timeout 0x10
1433          * 256 RBDs
1434          */
1435         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1436             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1437             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1438             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1439             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1440             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1441             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1442
1443         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1444
1445         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1446         if (sc->cfg->host_interrupt_operation_mode)
1447                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1448
1449         /*
1450          * Thus sayeth el jefe (iwlwifi) via a comment:
1451          *
1452          * This value should initially be 0 (before preparing any
1453          * RBs), should be 8 after preparing the first 8 RBs (for example)
1454          */
1455         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1456
1457         iwm_nic_unlock(sc);
1458
1459         return 0;
1460 }
1461
1462 static int
1463 iwm_nic_tx_init(struct iwm_softc *sc)
1464 {
1465         int qid;
1466
1467         if (!iwm_nic_lock(sc))
1468                 return EBUSY;
1469
1470         /* Deactivate TX scheduler. */
1471         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1472
1473         /* Set physical address of "keep warm" page (16-byte aligned). */
1474         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1475
1476         /* Initialize TX rings. */
1477         for (qid = 0; qid < nitems(sc->txq); qid++) {
1478                 struct iwm_tx_ring *txq = &sc->txq[qid];
1479
1480                 /* Set physical address of TX ring (256-byte aligned). */
1481                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1482                     txq->desc_dma.paddr >> 8);
1483                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1484                     "%s: loading ring %d descriptors (%p) at %lx\n",
1485                     __func__,
1486                     qid, txq->desc,
1487                     (unsigned long) (txq->desc_dma.paddr >> 8));
1488         }
1489
1490         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1491
1492         iwm_nic_unlock(sc);
1493
1494         return 0;
1495 }
1496
1497 static int
1498 iwm_nic_init(struct iwm_softc *sc)
1499 {
1500         int error;
1501
1502         iwm_apm_init(sc);
1503         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1504                 iwm_set_pwr(sc);
1505
1506         iwm_mvm_nic_config(sc);
1507
1508         if ((error = iwm_nic_rx_init(sc)) != 0)
1509                 return error;
1510
1511         /*
1512          * Ditto for TX, from iwn
1513          */
1514         if ((error = iwm_nic_tx_init(sc)) != 0)
1515                 return error;
1516
1517         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1518             "%s: shadow registers enabled\n", __func__);
1519         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1520
1521         return 0;
1522 }
1523
1524 int
1525 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1526 {
1527         if (!iwm_nic_lock(sc)) {
1528                 device_printf(sc->sc_dev,
1529                     "%s: cannot enable txq %d\n",
1530                     __func__,
1531                     qid);
1532                 return EBUSY;
1533         }
1534
1535         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1536
1537         if (qid == IWM_MVM_CMD_QUEUE) {
1538                 /* unactivate before configuration */
1539                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1540                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1541                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1542
1543                 iwm_nic_unlock(sc);
1544
1545                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1546
1547                 if (!iwm_nic_lock(sc)) {
1548                         device_printf(sc->sc_dev,
1549                             "%s: cannot enable txq %d\n", __func__, qid);
1550                         return EBUSY;
1551                 }
1552                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1553                 iwm_nic_unlock(sc);
1554
1555                 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1556                 /* Set scheduler window size and frame limit. */
1557                 iwm_write_mem32(sc,
1558                     sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1559                     sizeof(uint32_t),
1560                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1561                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1562                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1563                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1564
1565                 if (!iwm_nic_lock(sc)) {
1566                         device_printf(sc->sc_dev,
1567                             "%s: cannot enable txq %d\n", __func__, qid);
1568                         return EBUSY;
1569                 }
1570                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1571                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1572                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1573                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1574                     IWM_SCD_QUEUE_STTS_REG_MSK);
1575         } else {
1576                 struct iwm_scd_txq_cfg_cmd cmd;
1577                 int error;
1578
1579                 iwm_nic_unlock(sc);
1580
1581                 memset(&cmd, 0, sizeof(cmd));
1582                 cmd.scd_queue = qid;
1583                 cmd.enable = 1;
1584                 cmd.sta_id = sta_id;
1585                 cmd.tx_fifo = fifo;
1586                 cmd.aggregate = 0;
1587                 cmd.window = IWM_FRAME_LIMIT;
1588
1589                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1590                     sizeof(cmd), &cmd);
1591                 if (error) {
1592                         device_printf(sc->sc_dev,
1593                             "cannot enable txq %d\n", qid);
1594                         return error;
1595                 }
1596
1597                 if (!iwm_nic_lock(sc))
1598                         return EBUSY;
1599         }
1600
1601         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1602             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1603
1604         iwm_nic_unlock(sc);
1605
1606         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1607             __func__, qid, fifo);
1608
1609         return 0;
1610 }
1611
1612 static int
1613 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1614 {
1615         int error, chnl;
1616
1617         int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1618             IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1619
1620         if (!iwm_nic_lock(sc))
1621                 return EBUSY;
1622
1623         iwm_ict_reset(sc);
1624
1625         sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1626         if (scd_base_addr != 0 &&
1627             scd_base_addr != sc->scd_base_addr) {
1628                 device_printf(sc->sc_dev,
1629                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1630                     __func__, sc->scd_base_addr, scd_base_addr);
1631         }
1632
1633         iwm_nic_unlock(sc);
1634
1635         /* reset context data, TX status and translation data */
1636         error = iwm_write_mem(sc,
1637             sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1638             NULL, clear_dwords);
1639         if (error)
1640                 return EBUSY;
1641
1642         if (!iwm_nic_lock(sc))
1643                 return EBUSY;
1644
1645         /* Set physical address of TX scheduler rings (1KB aligned). */
1646         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1647
1648         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1649
1650         iwm_nic_unlock(sc);
1651
1652         /* enable command channel */
1653         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1654         if (error)
1655                 return error;
1656
1657         if (!iwm_nic_lock(sc))
1658                 return EBUSY;
1659
1660         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1661
1662         /* Enable DMA channels. */
1663         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1664                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1665                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1666                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1667         }
1668
1669         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1670             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1671
1672         iwm_nic_unlock(sc);
1673
1674         /* Enable L1-Active */
1675         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1676                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1677                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1678         }
1679
1680         return error;
1681 }
1682
1683 /*
1684  * NVM read access and content parsing.  We do not support
1685  * external NVM or writing NVM.
1686  * iwlwifi/mvm/nvm.c
1687  */
1688
1689 /* Default NVM size to read */
1690 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1691
1692 #define IWM_NVM_WRITE_OPCODE 1
1693 #define IWM_NVM_READ_OPCODE 0
1694
1695 /* load nvm chunk response */
1696 enum {
1697         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1698         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1699 };
1700
1701 static int
1702 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1703         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1704 {
1705         struct iwm_nvm_access_cmd nvm_access_cmd = {
1706                 .offset = htole16(offset),
1707                 .length = htole16(length),
1708                 .type = htole16(section),
1709                 .op_code = IWM_NVM_READ_OPCODE,
1710         };
1711         struct iwm_nvm_access_resp *nvm_resp;
1712         struct iwm_rx_packet *pkt;
1713         struct iwm_host_cmd cmd = {
1714                 .id = IWM_NVM_ACCESS_CMD,
1715                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1716                 .data = { &nvm_access_cmd, },
1717         };
1718         int ret, bytes_read, offset_read;
1719         uint8_t *resp_data;
1720
1721         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1722
1723         ret = iwm_send_cmd(sc, &cmd);
1724         if (ret) {
1725                 device_printf(sc->sc_dev,
1726                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1727                 return ret;
1728         }
1729
1730         pkt = cmd.resp_pkt;
1731
1732         /* Extract NVM response */
1733         nvm_resp = (void *)pkt->data;
1734         ret = le16toh(nvm_resp->status);
1735         bytes_read = le16toh(nvm_resp->length);
1736         offset_read = le16toh(nvm_resp->offset);
1737         resp_data = nvm_resp->data;
1738         if (ret) {
1739                 if ((offset != 0) &&
1740                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1741                         /*
1742                          * meaning of NOT_VALID_ADDRESS:
1743                          * driver try to read chunk from address that is
1744                          * multiple of 2K and got an error since addr is empty.
1745                          * meaning of (offset != 0): driver already
1746                          * read valid data from another chunk so this case
1747                          * is not an error.
1748                          */
1749                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1750                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1751                                     offset);
1752                         *len = 0;
1753                         ret = 0;
1754                 } else {
1755                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1756                                     "NVM access command failed with status %d\n", ret);
1757                         ret = EIO;
1758                 }
1759                 goto exit;
1760         }
1761
1762         if (offset_read != offset) {
1763                 device_printf(sc->sc_dev,
1764                     "NVM ACCESS response with invalid offset %d\n",
1765                     offset_read);
1766                 ret = EINVAL;
1767                 goto exit;
1768         }
1769
1770         if (bytes_read > length) {
1771                 device_printf(sc->sc_dev,
1772                     "NVM ACCESS response with too much data "
1773                     "(%d bytes requested, %d bytes received)\n",
1774                     length, bytes_read);
1775                 ret = EINVAL;
1776                 goto exit;
1777         }
1778
1779         /* Write data to NVM */
1780         memcpy(data + offset, resp_data, bytes_read);
1781         *len = bytes_read;
1782
1783  exit:
1784         iwm_free_resp(sc, &cmd);
1785         return ret;
1786 }
1787
1788 /*
1789  * Reads an NVM section completely.
1790  * NICs prior to 7000 family don't have a real NVM, but just read
1791  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1792  * by uCode, we need to manually check in this case that we don't
1793  * overflow and try to read more than the EEPROM size.
1794  * For 7000 family NICs, we supply the maximal size we can read, and
1795  * the uCode fills the response with as much data as we can,
1796  * without overflowing, so no check is needed.
1797  */
1798 static int
1799 iwm_nvm_read_section(struct iwm_softc *sc,
1800         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1801 {
1802         uint16_t seglen, length, offset = 0;
1803         int ret;
1804
1805         /* Set nvm section read length */
1806         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1807
1808         seglen = length;
1809
1810         /* Read the NVM until exhausted (reading less than requested) */
1811         while (seglen == length) {
1812                 /* Check no memory assumptions fail and cause an overflow */
1813                 if ((size_read + offset + length) >
1814                     sc->cfg->eeprom_size) {
1815                         device_printf(sc->sc_dev,
1816                             "EEPROM size is too small for NVM\n");
1817                         return ENOBUFS;
1818                 }
1819
1820                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1821                 if (ret) {
1822                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1823                                     "Cannot read NVM from section %d offset %d, length %d\n",
1824                                     section, offset, length);
1825                         return ret;
1826                 }
1827                 offset += seglen;
1828         }
1829
1830         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1831                     "NVM section %d read completed\n", section);
1832         *len = offset;
1833         return 0;
1834 }
1835
1836 /*
1837  * BEGIN IWM_NVM_PARSE
1838  */
1839
1840 /* iwlwifi/iwl-nvm-parse.c */
1841
1842 /* NVM offsets (in words) definitions */
1843 enum iwm_nvm_offsets {
1844         /* NVM HW-Section offset (in words) definitions */
1845         IWM_HW_ADDR = 0x15,
1846
1847 /* NVM SW-Section offset (in words) definitions */
1848         IWM_NVM_SW_SECTION = 0x1C0,
1849         IWM_NVM_VERSION = 0,
1850         IWM_RADIO_CFG = 1,
1851         IWM_SKU = 2,
1852         IWM_N_HW_ADDRS = 3,
1853         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1854
1855 /* NVM calibration section offset (in words) definitions */
1856         IWM_NVM_CALIB_SECTION = 0x2B8,
1857         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1858 };
1859
1860 enum iwm_8000_nvm_offsets {
1861         /* NVM HW-Section offset (in words) definitions */
1862         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1863         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1864         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1865         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1866         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1867
1868         /* NVM SW-Section offset (in words) definitions */
1869         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1870         IWM_NVM_VERSION_8000 = 0,
1871         IWM_RADIO_CFG_8000 = 0,
1872         IWM_SKU_8000 = 2,
1873         IWM_N_HW_ADDRS_8000 = 3,
1874
1875         /* NVM REGULATORY -Section offset (in words) definitions */
1876         IWM_NVM_CHANNELS_8000 = 0,
1877         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1878         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1879         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1880
1881         /* NVM calibration section offset (in words) definitions */
1882         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1883         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1884 };
1885
1886 /* SKU Capabilities (actual values from NVM definition) */
1887 enum nvm_sku_bits {
1888         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1889         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1890         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1891         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1892 };
1893
1894 /* radio config bits (actual values from NVM definition) */
1895 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1896 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1897 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1898 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1899 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1900 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1901
1902 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1903 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1904 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1905 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1906 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1907 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1908
1909 #define DEFAULT_MAX_TX_POWER 16
1910
1911 /**
1912  * enum iwm_nvm_channel_flags - channel flags in NVM
1913  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1914  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1915  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1916  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1917  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1918  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1919  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1920  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1921  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1922  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1923  */
1924 enum iwm_nvm_channel_flags {
1925         IWM_NVM_CHANNEL_VALID = (1 << 0),
1926         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1927         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1928         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1929         IWM_NVM_CHANNEL_DFS = (1 << 7),
1930         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1931         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1932         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1933         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1934 };
1935
1936 /*
1937  * Translate EEPROM flags to net80211.
1938  */
1939 static uint32_t
1940 iwm_eeprom_channel_flags(uint16_t ch_flags)
1941 {
1942         uint32_t nflags;
1943
1944         nflags = 0;
1945         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1946                 nflags |= IEEE80211_CHAN_PASSIVE;
1947         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1948                 nflags |= IEEE80211_CHAN_NOADHOC;
1949         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1950                 nflags |= IEEE80211_CHAN_DFS;
1951                 /* Just in case. */
1952                 nflags |= IEEE80211_CHAN_NOADHOC;
1953         }
1954
1955         return (nflags);
1956 }
1957
1958 static void
1959 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1960     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1961     const uint8_t bands[])
1962 {
1963         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1964         uint32_t nflags;
1965         uint16_t ch_flags;
1966         uint8_t ieee;
1967         int error;
1968
1969         for (; ch_idx < ch_num; ch_idx++) {
1970                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1971                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1972                         ieee = iwm_nvm_channels[ch_idx];
1973                 else
1974                         ieee = iwm_nvm_channels_8000[ch_idx];
1975
1976                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1977                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1978                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
1979                             ieee, ch_flags,
1980                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1981                             "5.2" : "2.4");
1982                         continue;
1983                 }
1984
1985                 nflags = iwm_eeprom_channel_flags(ch_flags);
1986                 error = ieee80211_add_channel(chans, maxchans, nchans,
1987                     ieee, 0, 0, nflags, bands);
1988                 if (error != 0)
1989                         break;
1990
1991                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1992                     "Ch. %d Flags %x [%sGHz] - Added\n",
1993                     ieee, ch_flags,
1994                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1995                     "5.2" : "2.4");
1996         }
1997 }
1998
1999 static void
2000 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2001     struct ieee80211_channel chans[])
2002 {
2003         struct iwm_softc *sc = ic->ic_softc;
2004         struct iwm_nvm_data *data = sc->nvm_data;
2005         uint8_t bands[IEEE80211_MODE_BYTES];
2006         size_t ch_num;
2007
2008         memset(bands, 0, sizeof(bands));
2009         /* 1-13: 11b/g channels. */
2010         setbit(bands, IEEE80211_MODE_11B);
2011         setbit(bands, IEEE80211_MODE_11G);
2012         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2013             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2014
2015         /* 14: 11b channel only. */
2016         clrbit(bands, IEEE80211_MODE_11G);
2017         iwm_add_channel_band(sc, chans, maxchans, nchans,
2018             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2019
2020         if (data->sku_cap_band_52GHz_enable) {
2021                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2022                         ch_num = nitems(iwm_nvm_channels);
2023                 else
2024                         ch_num = nitems(iwm_nvm_channels_8000);
2025                 memset(bands, 0, sizeof(bands));
2026                 setbit(bands, IEEE80211_MODE_11A);
2027                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2028                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2029         }
2030 }
2031
2032 static void
2033 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2034         const uint16_t *mac_override, const uint16_t *nvm_hw)
2035 {
2036         const uint8_t *hw_addr;
2037
2038         if (mac_override) {
2039                 static const uint8_t reserved_mac[] = {
2040                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2041                 };
2042
2043                 hw_addr = (const uint8_t *)(mac_override +
2044                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2045
2046                 /*
2047                  * Store the MAC address from MAO section.
2048                  * No byte swapping is required in MAO section
2049                  */
2050                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2051
2052                 /*
2053                  * Force the use of the OTP MAC address in case of reserved MAC
2054                  * address in the NVM, or if address is given but invalid.
2055                  */
2056                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2057                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2058                     iwm_is_valid_ether_addr(data->hw_addr) &&
2059                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2060                         return;
2061
2062                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2063                     "%s: mac address from nvm override section invalid\n",
2064                     __func__);
2065         }
2066
2067         if (nvm_hw) {
2068                 /* read the mac address from WFMP registers */
2069                 uint32_t mac_addr0 =
2070                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2071                 uint32_t mac_addr1 =
2072                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2073
2074                 hw_addr = (const uint8_t *)&mac_addr0;
2075                 data->hw_addr[0] = hw_addr[3];
2076                 data->hw_addr[1] = hw_addr[2];
2077                 data->hw_addr[2] = hw_addr[1];
2078                 data->hw_addr[3] = hw_addr[0];
2079
2080                 hw_addr = (const uint8_t *)&mac_addr1;
2081                 data->hw_addr[4] = hw_addr[1];
2082                 data->hw_addr[5] = hw_addr[0];
2083
2084                 return;
2085         }
2086
2087         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2088         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2089 }
2090
2091 static int
2092 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2093             const uint16_t *phy_sku)
2094 {
2095         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2096                 return le16_to_cpup(nvm_sw + IWM_SKU);
2097
2098         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2099 }
2100
2101 static int
2102 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2103 {
2104         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2105                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2106         else
2107                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2108                                                 IWM_NVM_VERSION_8000));
2109 }
2110
2111 static int
2112 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2113                   const uint16_t *phy_sku)
2114 {
2115         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2116                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2117
2118         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2119 }
2120
2121 static int
2122 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2123 {
2124         int n_hw_addr;
2125
2126         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2127                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2128
2129         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2130
2131         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2132 }
2133
2134 static void
2135 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2136                   uint32_t radio_cfg)
2137 {
2138         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2139                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2140                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2141                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2142                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2143                 return;
2144         }
2145
2146         /* set the radio configuration for family 8000 */
2147         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2148         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2149         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2150         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2151         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2152         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2153 }
2154
2155 static int
2156 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2157                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2158 {
2159 #ifdef notyet /* for FAMILY 9000 */
2160         if (cfg->mac_addr_from_csr) {
2161                 iwm_set_hw_address_from_csr(sc, data);
2162         } else
2163 #endif
2164         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2165                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2166
2167                 /* The byte order is little endian 16 bit, meaning 214365 */
2168                 data->hw_addr[0] = hw_addr[1];
2169                 data->hw_addr[1] = hw_addr[0];
2170                 data->hw_addr[2] = hw_addr[3];
2171                 data->hw_addr[3] = hw_addr[2];
2172                 data->hw_addr[4] = hw_addr[5];
2173                 data->hw_addr[5] = hw_addr[4];
2174         } else {
2175                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2176         }
2177
2178         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2179                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2180                 return EINVAL;
2181         }
2182
2183         return 0;
2184 }
2185
2186 static struct iwm_nvm_data *
2187 iwm_parse_nvm_data(struct iwm_softc *sc,
2188                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2189                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2190                    const uint16_t *phy_sku, const uint16_t *regulatory)
2191 {
2192         struct iwm_nvm_data *data;
2193         uint32_t sku, radio_cfg;
2194         uint16_t lar_config;
2195
2196         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2197                 data = malloc(sizeof(*data) +
2198                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2199                     M_DEVBUF, M_NOWAIT | M_ZERO);
2200         } else {
2201                 data = malloc(sizeof(*data) +
2202                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2203                     M_DEVBUF, M_NOWAIT | M_ZERO);
2204         }
2205         if (!data)
2206                 return NULL;
2207
2208         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2209
2210         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2211         iwm_set_radio_cfg(sc, data, radio_cfg);
2212
2213         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2214         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2215         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2216         data->sku_cap_11n_enable = 0;
2217
2218         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2219
2220         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2221                 uint16_t lar_offset = data->nvm_version < 0xE39 ?
2222                                        IWM_NVM_LAR_OFFSET_8000_OLD :
2223                                        IWM_NVM_LAR_OFFSET_8000;
2224
2225                 lar_config = le16_to_cpup(regulatory + lar_offset);
2226                 data->lar_enabled = !!(lar_config &
2227                                        IWM_NVM_LAR_ENABLED_8000);
2228         }
2229
2230         /* If no valid mac address was found - bail out */
2231         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2232                 free(data, M_DEVBUF);
2233                 return NULL;
2234         }
2235
2236         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2237                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2238                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2239         } else {
2240                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2241                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2242         }
2243
2244         return data;
2245 }
2246
2247 static void
2248 iwm_free_nvm_data(struct iwm_nvm_data *data)
2249 {
2250         if (data != NULL)
2251                 free(data, M_DEVBUF);
2252 }
2253
2254 static struct iwm_nvm_data *
2255 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2256 {
2257         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2258
2259         /* Checking for required sections */
2260         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2261                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2262                     !sections[sc->cfg->nvm_hw_section_num].data) {
2263                         device_printf(sc->sc_dev,
2264                             "Can't parse empty OTP/NVM sections\n");
2265                         return NULL;
2266                 }
2267         } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2268                 /* SW and REGULATORY sections are mandatory */
2269                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2270                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2271                         device_printf(sc->sc_dev,
2272                             "Can't parse empty OTP/NVM sections\n");
2273                         return NULL;
2274                 }
2275                 /* MAC_OVERRIDE or at least HW section must exist */
2276                 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2277                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2278                         device_printf(sc->sc_dev,
2279                             "Can't parse mac_address, empty sections\n");
2280                         return NULL;
2281                 }
2282
2283                 /* PHY_SKU section is mandatory in B0 */
2284                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2285                         device_printf(sc->sc_dev,
2286                             "Can't parse phy_sku in B0, empty sections\n");
2287                         return NULL;
2288                 }
2289         } else {
2290                 panic("unknown device family %d\n", sc->cfg->device_family);
2291         }
2292
2293         hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2294         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2295         calib = (const uint16_t *)
2296             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2297         regulatory = (const uint16_t *)
2298             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2299         mac_override = (const uint16_t *)
2300             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2301         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2302
2303         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2304             phy_sku, regulatory);
2305 }
2306
2307 static int
2308 iwm_nvm_init(struct iwm_softc *sc)
2309 {
2310         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2311         int i, ret, section;
2312         uint32_t size_read = 0;
2313         uint8_t *nvm_buffer, *temp;
2314         uint16_t len;
2315
2316         memset(nvm_sections, 0, sizeof(nvm_sections));
2317
2318         if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2319                 return EINVAL;
2320
2321         /* load NVM values from nic */
2322         /* Read From FW NVM */
2323         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2324
2325         nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2326         if (!nvm_buffer)
2327                 return ENOMEM;
2328         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2329                 /* we override the constness for initial read */
2330                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2331                                            &len, size_read);
2332                 if (ret)
2333                         continue;
2334                 size_read += len;
2335                 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2336                 if (!temp) {
2337                         ret = ENOMEM;
2338                         break;
2339                 }
2340                 memcpy(temp, nvm_buffer, len);
2341
2342                 nvm_sections[section].data = temp;
2343                 nvm_sections[section].length = len;
2344         }
2345         if (!size_read)
2346                 device_printf(sc->sc_dev, "OTP is blank\n");
2347         free(nvm_buffer, M_DEVBUF);
2348
2349         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2350         if (!sc->nvm_data)
2351                 return EINVAL;
2352         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2353                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2354
2355         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2356                 if (nvm_sections[i].data != NULL)
2357                         free(nvm_sections[i].data, M_DEVBUF);
2358         }
2359
2360         return 0;
2361 }
2362
2363 static int
2364 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2365         const struct iwm_fw_desc *section)
2366 {
2367         struct iwm_dma_info *dma = &sc->fw_dma;
2368         uint8_t *v_addr;
2369         bus_addr_t p_addr;
2370         uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2371         int ret = 0;
2372
2373         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2374                     "%s: [%d] uCode section being loaded...\n",
2375                     __func__, section_num);
2376
2377         v_addr = dma->vaddr;
2378         p_addr = dma->paddr;
2379
2380         for (offset = 0; offset < section->len; offset += chunk_sz) {
2381                 uint32_t copy_size, dst_addr;
2382                 int extended_addr = FALSE;
2383
2384                 copy_size = MIN(chunk_sz, section->len - offset);
2385                 dst_addr = section->offset + offset;
2386
2387                 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2388                     dst_addr <= IWM_FW_MEM_EXTENDED_END)
2389                         extended_addr = TRUE;
2390
2391                 if (extended_addr)
2392                         iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2393                                           IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2394
2395                 memcpy(v_addr, (const uint8_t *)section->data + offset,
2396                     copy_size);
2397                 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2398                 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2399                                                    copy_size);
2400
2401                 if (extended_addr)
2402                         iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2403                                             IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2404
2405                 if (ret) {
2406                         device_printf(sc->sc_dev,
2407                             "%s: Could not load the [%d] uCode section\n",
2408                             __func__, section_num);
2409                         break;
2410                 }
2411         }
2412
2413         return ret;
2414 }
2415
2416 /*
2417  * ucode
2418  */
2419 static int
2420 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2421                              bus_addr_t phy_addr, uint32_t byte_cnt)
2422 {
2423         int ret;
2424
2425         sc->sc_fw_chunk_done = 0;
2426
2427         if (!iwm_nic_lock(sc))
2428                 return EBUSY;
2429
2430         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2431             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2432
2433         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2434             dst_addr);
2435
2436         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2437             phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2438
2439         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2440             (iwm_get_dma_hi_addr(phy_addr)
2441              << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2442
2443         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2444             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2445             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2446             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2447
2448         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2449             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2450             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2451             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2452
2453         iwm_nic_unlock(sc);
2454
2455         /* wait up to 5s for this segment to load */
2456         ret = 0;
2457         while (!sc->sc_fw_chunk_done) {
2458                 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2459                 if (ret)
2460                         break;
2461         }
2462
2463         if (ret != 0) {
2464                 device_printf(sc->sc_dev,
2465                     "fw chunk addr 0x%x len %d failed to load\n",
2466                     dst_addr, byte_cnt);
2467                 return ETIMEDOUT;
2468         }
2469
2470         return 0;
2471 }
2472
2473 static int
2474 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2475         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2476 {
2477         int shift_param;
2478         int i, ret = 0, sec_num = 0x1;
2479         uint32_t val, last_read_idx = 0;
2480
2481         if (cpu == 1) {
2482                 shift_param = 0;
2483                 *first_ucode_section = 0;
2484         } else {
2485                 shift_param = 16;
2486                 (*first_ucode_section)++;
2487         }
2488
2489         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2490                 last_read_idx = i;
2491
2492                 /*
2493                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2494                  * CPU1 to CPU2.
2495                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2496                  * CPU2 non paged to CPU2 paging sec.
2497                  */
2498                 if (!image->fw_sect[i].data ||
2499                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2500                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2501                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2502                                     "Break since Data not valid or Empty section, sec = %d\n",
2503                                     i);
2504                         break;
2505                 }
2506                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2507                 if (ret)
2508                         return ret;
2509
2510                 /* Notify the ucode of the loaded section number and status */
2511                 if (iwm_nic_lock(sc)) {
2512                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2513                         val = val | (sec_num << shift_param);
2514                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2515                         sec_num = (sec_num << 1) | 0x1;
2516                         iwm_nic_unlock(sc);
2517                 }
2518         }
2519
2520         *first_ucode_section = last_read_idx;
2521
2522         iwm_enable_interrupts(sc);
2523
2524         if (iwm_nic_lock(sc)) {
2525                 if (cpu == 1)
2526                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2527                 else
2528                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2529                 iwm_nic_unlock(sc);
2530         }
2531
2532         return 0;
2533 }
2534
2535 static int
2536 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2537         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2538 {
2539         int shift_param;
2540         int i, ret = 0;
2541         uint32_t last_read_idx = 0;
2542
2543         if (cpu == 1) {
2544                 shift_param = 0;
2545                 *first_ucode_section = 0;
2546         } else {
2547                 shift_param = 16;
2548                 (*first_ucode_section)++;
2549         }
2550
2551         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2552                 last_read_idx = i;
2553
2554                 /*
2555                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2556                  * CPU1 to CPU2.
2557                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2558                  * CPU2 non paged to CPU2 paging sec.
2559                  */
2560                 if (!image->fw_sect[i].data ||
2561                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2562                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2563                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2564                                     "Break since Data not valid or Empty section, sec = %d\n",
2565                                      i);
2566                         break;
2567                 }
2568
2569                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2570                 if (ret)
2571                         return ret;
2572         }
2573
2574         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2575                 iwm_set_bits_prph(sc,
2576                                   IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2577                                   (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2578                                    IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2579                                    IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2580                                         shift_param);
2581
2582         *first_ucode_section = last_read_idx;
2583
2584         return 0;
2585
2586 }
2587
2588 static int
2589 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2590         const struct iwm_fw_sects *image)
2591 {
2592         int ret = 0;
2593         int first_ucode_section;
2594
2595         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2596                      image->is_dual_cpus ? "Dual" : "Single");
2597
2598         /* load to FW the binary non secured sections of CPU1 */
2599         ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2600         if (ret)
2601                 return ret;
2602
2603         if (image->is_dual_cpus) {
2604                 /* set CPU2 header address */
2605                 if (iwm_nic_lock(sc)) {
2606                         iwm_write_prph(sc,
2607                                        IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2608                                        IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2609                         iwm_nic_unlock(sc);
2610                 }
2611
2612                 /* load to FW the binary sections of CPU2 */
2613                 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2614                                                  &first_ucode_section);
2615                 if (ret)
2616                         return ret;
2617         }
2618
2619         iwm_enable_interrupts(sc);
2620
2621         /* release CPU reset */
2622         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2623
2624         return 0;
2625 }
2626
2627 int
2628 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2629         const struct iwm_fw_sects *image)
2630 {
2631         int ret = 0;
2632         int first_ucode_section;
2633
2634         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2635                     image->is_dual_cpus ? "Dual" : "Single");
2636
2637         /* configure the ucode to be ready to get the secured image */
2638         /* release CPU reset */
2639         if (iwm_nic_lock(sc)) {
2640                 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2641                     IWM_RELEASE_CPU_RESET_BIT);
2642                 iwm_nic_unlock(sc);
2643         }
2644
2645         /* load to FW the binary Secured sections of CPU1 */
2646         ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2647             &first_ucode_section);
2648         if (ret)
2649                 return ret;
2650
2651         /* load to FW the binary sections of CPU2 */
2652         return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2653             &first_ucode_section);
2654 }
2655
2656 /* XXX Get rid of this definition */
2657 static inline void
2658 iwm_enable_fw_load_int(struct iwm_softc *sc)
2659 {
2660         IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2661         sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2662         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2663 }
2664
2665 /* XXX Add proper rfkill support code */
2666 static int
2667 iwm_start_fw(struct iwm_softc *sc,
2668         const struct iwm_fw_sects *fw)
2669 {
2670         int ret;
2671
2672         /* This may fail if AMT took ownership of the device */
2673         if (iwm_prepare_card_hw(sc)) {
2674                 device_printf(sc->sc_dev,
2675                     "%s: Exit HW not ready\n", __func__);
2676                 ret = EIO;
2677                 goto out;
2678         }
2679
2680         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2681
2682         iwm_disable_interrupts(sc);
2683
2684         /* make sure rfkill handshake bits are cleared */
2685         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2686         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2687             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2688
2689         /* clear (again), then enable host interrupts */
2690         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2691
2692         ret = iwm_nic_init(sc);
2693         if (ret) {
2694                 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2695                 goto out;
2696         }
2697
2698         /*
2699          * Now, we load the firmware and don't want to be interrupted, even
2700          * by the RF-Kill interrupt (hence mask all the interrupt besides the
2701          * FH_TX interrupt which is needed to load the firmware). If the
2702          * RF-Kill switch is toggled, we will find out after having loaded
2703          * the firmware and return the proper value to the caller.
2704          */
2705         iwm_enable_fw_load_int(sc);
2706
2707         /* really make sure rfkill handshake bits are cleared */
2708         /* maybe we should write a few times more?  just to make sure */
2709         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2710         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2711
2712         /* Load the given image to the HW */
2713         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2714                 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2715         else
2716                 ret = iwm_pcie_load_given_ucode(sc, fw);
2717
2718         /* XXX re-check RF-Kill state */
2719
2720 out:
2721         return ret;
2722 }
2723
2724 static int
2725 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2726 {
2727         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2728                 .valid = htole32(valid_tx_ant),
2729         };
2730
2731         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2732             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2733 }
2734
2735 /* iwlwifi: mvm/fw.c */
2736 static int
2737 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2738 {
2739         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2740         enum iwm_ucode_type ucode_type = sc->cur_ucode;
2741
2742         /* Set parameters */
2743         phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2744         phy_cfg_cmd.calib_control.event_trigger =
2745             sc->sc_default_calib[ucode_type].event_trigger;
2746         phy_cfg_cmd.calib_control.flow_trigger =
2747             sc->sc_default_calib[ucode_type].flow_trigger;
2748
2749         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2750             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2751         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2752             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2753 }
2754
2755 static int
2756 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2757 {
2758         struct iwm_mvm_alive_data *alive_data = data;
2759         struct iwm_mvm_alive_resp_ver1 *palive1;
2760         struct iwm_mvm_alive_resp_ver2 *palive2;
2761         struct iwm_mvm_alive_resp *palive;
2762
2763         if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2764                 palive1 = (void *)pkt->data;
2765
2766                 sc->support_umac_log = FALSE;
2767                 sc->error_event_table =
2768                         le32toh(palive1->error_event_table_ptr);
2769                 sc->log_event_table =
2770                         le32toh(palive1->log_event_table_ptr);
2771                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2772
2773                 alive_data->valid = le16toh(palive1->status) ==
2774                                     IWM_ALIVE_STATUS_OK;
2775                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2776                             "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2777                              le16toh(palive1->status), palive1->ver_type,
2778                              palive1->ver_subtype, palive1->flags);
2779         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2780                 palive2 = (void *)pkt->data;
2781                 sc->error_event_table =
2782                         le32toh(palive2->error_event_table_ptr);
2783                 sc->log_event_table =
2784                         le32toh(palive2->log_event_table_ptr);
2785                 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2786                 sc->umac_error_event_table =
2787                         le32toh(palive2->error_info_addr);
2788
2789                 alive_data->valid = le16toh(palive2->status) ==
2790                                     IWM_ALIVE_STATUS_OK;
2791                 if (sc->umac_error_event_table)
2792                         sc->support_umac_log = TRUE;
2793
2794                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2795                             "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2796                             le16toh(palive2->status), palive2->ver_type,
2797                             palive2->ver_subtype, palive2->flags);
2798
2799                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2800                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2801                             palive2->umac_major, palive2->umac_minor);
2802         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2803                 palive = (void *)pkt->data;
2804
2805                 sc->error_event_table =
2806                         le32toh(palive->error_event_table_ptr);
2807                 sc->log_event_table =
2808                         le32toh(palive->log_event_table_ptr);
2809                 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2810                 sc->umac_error_event_table =
2811                         le32toh(palive->error_info_addr);
2812
2813                 alive_data->valid = le16toh(palive->status) ==
2814                                     IWM_ALIVE_STATUS_OK;
2815                 if (sc->umac_error_event_table)
2816                         sc->support_umac_log = TRUE;
2817
2818                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2819                             "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2820                             le16toh(palive->status), palive->ver_type,
2821                             palive->ver_subtype, palive->flags);
2822
2823                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2824                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2825                             le32toh(palive->umac_major),
2826                             le32toh(palive->umac_minor));
2827         }
2828
2829         return TRUE;
2830 }
2831
2832 static int
2833 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2834         struct iwm_rx_packet *pkt, void *data)
2835 {
2836         struct iwm_phy_db *phy_db = data;
2837
2838         if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2839                 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2840                         device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2841                             __func__, pkt->hdr.code);
2842                 }
2843                 return TRUE;
2844         }
2845
2846         if (iwm_phy_db_set_section(phy_db, pkt)) {
2847                 device_printf(sc->sc_dev,
2848                     "%s: iwm_phy_db_set_section failed\n", __func__);
2849         }
2850
2851         return FALSE;
2852 }
2853
2854 static int
2855 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2856         enum iwm_ucode_type ucode_type)
2857 {
2858         struct iwm_notification_wait alive_wait;
2859         struct iwm_mvm_alive_data alive_data;
2860         const struct iwm_fw_sects *fw;
2861         enum iwm_ucode_type old_type = sc->cur_ucode;
2862         int error;
2863         static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2864
2865         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2866                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2867                         error);
2868                 return error;
2869         }
2870         fw = &sc->sc_fw.fw_sects[ucode_type];
2871         sc->cur_ucode = ucode_type;
2872         sc->ucode_loaded = FALSE;
2873
2874         memset(&alive_data, 0, sizeof(alive_data));
2875         iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2876                                    alive_cmd, nitems(alive_cmd),
2877                                    iwm_alive_fn, &alive_data);
2878
2879         error = iwm_start_fw(sc, fw);
2880         if (error) {
2881                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2882                 sc->cur_ucode = old_type;
2883                 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2884                 return error;
2885         }
2886
2887         /*
2888          * Some things may run in the background now, but we
2889          * just wait for the ALIVE notification here.
2890          */
2891         IWM_UNLOCK(sc);
2892         error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2893                                       IWM_MVM_UCODE_ALIVE_TIMEOUT);
2894         IWM_LOCK(sc);
2895         if (error) {
2896                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2897                         uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2898                         if (iwm_nic_lock(sc)) {
2899                                 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2900                                 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2901                                 iwm_nic_unlock(sc);
2902                         }
2903                         device_printf(sc->sc_dev,
2904                             "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2905                             a, b);
2906                 }
2907                 sc->cur_ucode = old_type;
2908                 return error;
2909         }
2910
2911         if (!alive_data.valid) {
2912                 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2913                     __func__);
2914                 sc->cur_ucode = old_type;
2915                 return EIO;
2916         }
2917
2918         iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2919
2920         /*
2921          * configure and operate fw paging mechanism.
2922          * driver configures the paging flow only once, CPU2 paging image
2923          * included in the IWM_UCODE_INIT image.
2924          */
2925         if (fw->paging_mem_size) {
2926                 error = iwm_save_fw_paging(sc, fw);
2927                 if (error) {
2928                         device_printf(sc->sc_dev,
2929                             "%s: failed to save the FW paging image\n",
2930                             __func__);
2931                         return error;
2932                 }
2933
2934                 error = iwm_send_paging_cmd(sc, fw);
2935                 if (error) {
2936                         device_printf(sc->sc_dev,
2937                             "%s: failed to send the paging cmd\n", __func__);
2938                         iwm_free_fw_paging(sc);
2939                         return error;
2940                 }
2941         }
2942
2943         if (!error)
2944                 sc->ucode_loaded = TRUE;
2945         return error;
2946 }
2947
2948 /*
2949  * mvm misc bits
2950  */
2951
2952 /*
2953  * follows iwlwifi/fw.c
2954  */
2955 static int
2956 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2957 {
2958         struct iwm_notification_wait calib_wait;
2959         static const uint16_t init_complete[] = {
2960                 IWM_INIT_COMPLETE_NOTIF,
2961                 IWM_CALIB_RES_NOTIF_PHY_DB
2962         };
2963         int ret;
2964
2965         /* do not operate with rfkill switch turned on */
2966         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2967                 device_printf(sc->sc_dev,
2968                     "radio is disabled by hardware switch\n");
2969                 return EPERM;
2970         }
2971
2972         iwm_init_notification_wait(sc->sc_notif_wait,
2973                                    &calib_wait,
2974                                    init_complete,
2975                                    nitems(init_complete),
2976                                    iwm_wait_phy_db_entry,
2977                                    sc->sc_phy_db);
2978
2979         /* Will also start the device */
2980         ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2981         if (ret) {
2982                 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2983                     ret);
2984                 goto error;
2985         }
2986
2987         if (justnvm) {
2988                 /* Read nvm */
2989                 ret = iwm_nvm_init(sc);
2990                 if (ret) {
2991                         device_printf(sc->sc_dev, "failed to read nvm\n");
2992                         goto error;
2993                 }
2994                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2995                 goto error;
2996         }
2997
2998         ret = iwm_send_bt_init_conf(sc);
2999         if (ret) {
3000                 device_printf(sc->sc_dev,
3001                     "failed to send bt coex configuration: %d\n", ret);
3002                 goto error;
3003         }
3004
3005         /* Send TX valid antennas before triggering calibrations */
3006         ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3007         if (ret) {
3008                 device_printf(sc->sc_dev,
3009                     "failed to send antennas before calibration: %d\n", ret);
3010                 goto error;
3011         }
3012
3013         /*
3014          * Send phy configurations command to init uCode
3015          * to start the 16.0 uCode init image internal calibrations.
3016          */
3017         ret = iwm_send_phy_cfg_cmd(sc);
3018         if (ret) {
3019                 device_printf(sc->sc_dev,
3020                     "%s: Failed to run INIT calibrations: %d\n",
3021                     __func__, ret);
3022                 goto error;
3023         }
3024
3025         /*
3026          * Nothing to do but wait for the init complete notification
3027          * from the firmware.
3028          */
3029         IWM_UNLOCK(sc);
3030         ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3031             IWM_MVM_UCODE_CALIB_TIMEOUT);
3032         IWM_LOCK(sc);
3033
3034
3035         goto out;
3036
3037 error:
3038         iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3039 out:
3040         return ret;
3041 }
3042
3043 /*
3044  * receive side
3045  */
3046
3047 /* (re)stock rx ring, called at init-time and at runtime */
3048 static int
3049 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3050 {
3051         struct iwm_rx_ring *ring = &sc->rxq;
3052         struct iwm_rx_data *data = &ring->data[idx];
3053         struct mbuf *m;
3054         bus_dmamap_t dmamap;
3055         bus_dma_segment_t seg;
3056         int nsegs, error;
3057
3058         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3059         if (m == NULL)
3060                 return ENOBUFS;
3061
3062         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3063         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3064             &seg, &nsegs, BUS_DMA_NOWAIT);
3065         if (error != 0) {
3066                 device_printf(sc->sc_dev,
3067                     "%s: can't map mbuf, error %d\n", __func__, error);
3068                 m_freem(m);
3069                 return error;
3070         }
3071
3072         if (data->m != NULL)
3073                 bus_dmamap_unload(ring->data_dmat, data->map);
3074
3075         /* Swap ring->spare_map with data->map */
3076         dmamap = data->map;
3077         data->map = ring->spare_map;
3078         ring->spare_map = dmamap;
3079
3080         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3081         data->m = m;
3082
3083         /* Update RX descriptor. */
3084         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3085         ring->desc[idx] = htole32(seg.ds_addr >> 8);
3086         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3087             BUS_DMASYNC_PREWRITE);
3088
3089         return 0;
3090 }
3091
3092 /* iwlwifi: mvm/rx.c */
3093 /*
3094  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3095  * values are reported by the fw as positive values - need to negate
3096  * to obtain their dBM.  Account for missing antennas by replacing 0
3097  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3098  */
3099 static int
3100 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3101 {
3102         int energy_a, energy_b, energy_c, max_energy;
3103         uint32_t val;
3104
3105         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3106         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3107             IWM_RX_INFO_ENERGY_ANT_A_POS;
3108         energy_a = energy_a ? -energy_a : -256;
3109         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3110             IWM_RX_INFO_ENERGY_ANT_B_POS;
3111         energy_b = energy_b ? -energy_b : -256;
3112         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3113             IWM_RX_INFO_ENERGY_ANT_C_POS;
3114         energy_c = energy_c ? -energy_c : -256;
3115         max_energy = MAX(energy_a, energy_b);
3116         max_energy = MAX(max_energy, energy_c);
3117
3118         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3119             "energy In A %d B %d C %d , and max %d\n",
3120             energy_a, energy_b, energy_c, max_energy);
3121
3122         return max_energy;
3123 }
3124
3125 static void
3126 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3127 {
3128         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3129
3130         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3131
3132         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3133 }
3134
3135 /*
3136  * Retrieve the average noise (in dBm) among receivers.
3137  */
3138 static int
3139 iwm_get_noise(struct iwm_softc *sc,
3140     const struct iwm_mvm_statistics_rx_non_phy *stats)
3141 {
3142         int i, total, nbant, noise;
3143
3144         total = nbant = noise = 0;
3145         for (i = 0; i < 3; i++) {
3146                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3147                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3148                     __func__,
3149                     i,
3150                     noise);
3151
3152                 if (noise) {
3153                         total += noise;
3154                         nbant++;
3155                 }
3156         }
3157
3158         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3159             __func__, nbant, total);
3160 #if 0
3161         /* There should be at least one antenna but check anyway. */
3162         return (nbant == 0) ? -127 : (total / nbant) - 107;
3163 #else
3164         /* For now, just hard-code it to -96 to be safe */
3165         return (-96);
3166 #endif
3167 }
3168
3169 /*
3170  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3171  *
3172  * Handles the actual data of the Rx packet from the fw
3173  */
3174 static boolean_t
3175 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3176         boolean_t stolen)
3177 {
3178         struct ieee80211com *ic = &sc->sc_ic;
3179         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3180         struct ieee80211_frame *wh;
3181         struct ieee80211_node *ni;
3182         struct ieee80211_rx_stats rxs;
3183         struct iwm_rx_phy_info *phy_info;
3184         struct iwm_rx_mpdu_res_start *rx_res;
3185         struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3186         uint32_t len;
3187         uint32_t rx_pkt_status;
3188         int rssi;
3189
3190         phy_info = &sc->sc_last_phy_info;
3191         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3192         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3193         len = le16toh(rx_res->byte_count);
3194         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3195
3196         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3197                 device_printf(sc->sc_dev,
3198                     "dsp size out of range [0,20]: %d\n",
3199                     phy_info->cfg_phy_cnt);
3200                 goto fail;
3201         }
3202
3203         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3204             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3205                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3206                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3207                 goto fail;
3208         }
3209
3210         rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3211
3212         /* Map it to relative value */
3213         rssi = rssi - sc->sc_noise;
3214
3215         /* replenish ring for the buffer we're going to feed to the sharks */
3216         if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3217                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3218                     __func__);
3219                 goto fail;
3220         }
3221
3222         m->m_data = pkt->data + sizeof(*rx_res);
3223         m->m_pkthdr.len = m->m_len = len;
3224
3225         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3226             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3227
3228         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3229
3230         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3231             "%s: phy_info: channel=%d, flags=0x%08x\n",
3232             __func__,
3233             le16toh(phy_info->channel),
3234             le16toh(phy_info->phy_flags));
3235
3236         /*
3237          * Populate an RX state struct with the provided information.
3238          */
3239         bzero(&rxs, sizeof(rxs));
3240         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3241         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3242         rxs.c_ieee = le16toh(phy_info->channel);
3243         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3244                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3245         } else {
3246                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3247         }
3248
3249         /* rssi is in 1/2db units */
3250         rxs.c_rssi = rssi * 2;
3251         rxs.c_nf = sc->sc_noise;
3252         if (ieee80211_add_rx_params(m, &rxs) == 0) {
3253                 if (ni)
3254                         ieee80211_free_node(ni);
3255                 goto fail;
3256         }
3257
3258         if (ieee80211_radiotap_active_vap(vap)) {
3259                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3260
3261                 tap->wr_flags = 0;
3262                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3263                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3264                 tap->wr_chan_freq = htole16(rxs.c_freq);
3265                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3266                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3267                 tap->wr_dbm_antsignal = (int8_t)rssi;
3268                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3269                 tap->wr_tsft = phy_info->system_timestamp;
3270                 switch (phy_info->rate) {
3271                 /* CCK rates. */
3272                 case  10: tap->wr_rate =   2; break;
3273                 case  20: tap->wr_rate =   4; break;
3274                 case  55: tap->wr_rate =  11; break;
3275                 case 110: tap->wr_rate =  22; break;
3276                 /* OFDM rates. */
3277                 case 0xd: tap->wr_rate =  12; break;
3278                 case 0xf: tap->wr_rate =  18; break;
3279                 case 0x5: tap->wr_rate =  24; break;
3280                 case 0x7: tap->wr_rate =  36; break;
3281                 case 0x9: tap->wr_rate =  48; break;
3282                 case 0xb: tap->wr_rate =  72; break;
3283                 case 0x1: tap->wr_rate =  96; break;
3284                 case 0x3: tap->wr_rate = 108; break;
3285                 /* Unknown rate: should not happen. */
3286                 default:  tap->wr_rate =   0;
3287                 }
3288         }
3289
3290         IWM_UNLOCK(sc);
3291         if (ni != NULL) {
3292                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3293                 ieee80211_input_mimo(ni, m);
3294                 ieee80211_free_node(ni);
3295         } else {
3296                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3297                 ieee80211_input_mimo_all(ic, m);
3298         }
3299         IWM_LOCK(sc);
3300
3301         return TRUE;
3302
3303 fail:
3304         counter_u64_add(ic->ic_ierrors, 1);
3305         return FALSE;
3306 }
3307
3308 static int
3309 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3310         struct iwm_node *in)
3311 {
3312         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3313         struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3314         struct ieee80211_node *ni = &in->in_ni;
3315         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3316
3317         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3318
3319         /* Update rate control statistics. */
3320         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3321             __func__,
3322             (int) le16toh(tx_resp->status.status),
3323             (int) le16toh(tx_resp->status.sequence),
3324             tx_resp->frame_count,
3325             tx_resp->bt_kill_count,
3326             tx_resp->failure_rts,
3327             tx_resp->failure_frame,
3328             le32toh(tx_resp->initial_rate),
3329             (int) le16toh(tx_resp->wireless_media_time));
3330
3331         txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3332                      IEEE80211_RATECTL_STATUS_LONG_RETRY;
3333         txs->short_retries = tx_resp->failure_rts;
3334         txs->long_retries = tx_resp->failure_frame;
3335         if (status != IWM_TX_STATUS_SUCCESS &&
3336             status != IWM_TX_STATUS_DIRECT_DONE) {
3337                 switch (status) {
3338                 case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3339                         txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3340                         break;
3341                 case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3342                         txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3343                         break;
3344                 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3345                         txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3346                         break;
3347                 default:
3348                         txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3349                         break;
3350                 }
3351         } else {
3352                 txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3353         }
3354         ieee80211_ratectl_tx_complete(ni, txs);
3355
3356         return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3357 }
3358
3359 static void
3360 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3361 {
3362         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3363         int idx = cmd_hdr->idx;
3364         int qid = cmd_hdr->qid;
3365         struct iwm_tx_ring *ring = &sc->txq[qid];
3366         struct iwm_tx_data *txd = &ring->data[idx];
3367         struct iwm_node *in = txd->in;
3368         struct mbuf *m = txd->m;
3369         int status;
3370
3371         KASSERT(txd->done == 0, ("txd not done"));
3372         KASSERT(txd->in != NULL, ("txd without node"));
3373         KASSERT(txd->m != NULL, ("txd without mbuf"));
3374
3375         sc->sc_tx_timer = 0;
3376
3377         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3378
3379         /* Unmap and free mbuf. */
3380         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3381         bus_dmamap_unload(ring->data_dmat, txd->map);
3382
3383         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3384             "free txd %p, in %p\n", txd, txd->in);
3385         txd->done = 1;
3386         txd->m = NULL;
3387         txd->in = NULL;
3388
3389         ieee80211_tx_complete(&in->in_ni, m, status);
3390
3391         if (--ring->queued < IWM_TX_RING_LOMARK) {
3392                 sc->qfullmsk &= ~(1 << ring->qid);
3393                 if (sc->qfullmsk == 0) {
3394                         iwm_start(sc);
3395                 }
3396         }
3397 }
3398
3399 /*
3400  * transmit side
3401  */
3402
3403 /*
3404  * Process a "command done" firmware notification.  This is where we wakeup
3405  * processes waiting for a synchronous command completion.
3406  * from if_iwn
3407  */
3408 static void
3409 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3410 {
3411         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3412         struct iwm_tx_data *data;
3413
3414         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3415                 return; /* Not a command ack. */
3416         }
3417
3418         /* XXX wide commands? */
3419         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3420             "cmd notification type 0x%x qid %d idx %d\n",
3421             pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3422
3423         data = &ring->data[pkt->hdr.idx];
3424
3425         /* If the command was mapped in an mbuf, free it. */
3426         if (data->m != NULL) {
3427                 bus_dmamap_sync(ring->data_dmat, data->map,
3428                     BUS_DMASYNC_POSTWRITE);
3429                 bus_dmamap_unload(ring->data_dmat, data->map);
3430                 m_freem(data->m);
3431                 data->m = NULL;
3432         }
3433         wakeup(&ring->desc[pkt->hdr.idx]);
3434
3435         if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3436                 device_printf(sc->sc_dev,
3437                     "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3438                     __func__, pkt->hdr.idx, ring->queued, ring->cur);
3439                 /* XXX call iwm_force_nmi() */
3440         }
3441
3442         KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3443         ring->queued--;
3444         if (ring->queued == 0)
3445                 iwm_pcie_clear_cmd_in_flight(sc);
3446 }
3447
3448 #if 0
3449 /*
3450  * necessary only for block ack mode
3451  */
3452 void
3453 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3454         uint16_t len)
3455 {
3456         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3457         uint16_t w_val;
3458
3459         scd_bc_tbl = sc->sched_dma.vaddr;
3460
3461         len += 8; /* magic numbers came naturally from paris */
3462         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3463                 len = roundup(len, 4) / 4;
3464
3465         w_val = htole16(sta_id << 12 | len);
3466
3467         /* Update TX scheduler. */
3468         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3469         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3470             BUS_DMASYNC_PREWRITE);
3471
3472         /* I really wonder what this is ?!? */
3473         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3474                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3475                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3476                     BUS_DMASYNC_PREWRITE);
3477         }
3478 }
3479 #endif
3480
3481 /*
3482  * Take an 802.11 (non-n) rate, find the relevant rate
3483  * table entry.  return the index into in_ridx[].
3484  *
3485  * The caller then uses that index back into in_ridx
3486  * to figure out the rate index programmed /into/
3487  * the firmware for this given node.
3488  */
3489 static int
3490 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3491     uint8_t rate)
3492 {
3493         int i;
3494         uint8_t r;
3495
3496         for (i = 0; i < nitems(in->in_ridx); i++) {
3497                 r = iwm_rates[in->in_ridx[i]].rate;
3498                 if (rate == r)
3499                         return (i);
3500         }
3501
3502         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3503             "%s: couldn't find an entry for rate=%d\n",
3504             __func__,
3505             rate);
3506
3507         /* XXX Return the first */
3508         /* XXX TODO: have it return the /lowest/ */
3509         return (0);
3510 }
3511
3512 static int
3513 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3514 {
3515         int i;
3516
3517         for (i = 0; i < nitems(iwm_rates); i++) {
3518                 if (iwm_rates[i].rate == rate)
3519                         return (i);
3520         }
3521         /* XXX error? */
3522         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3523             "%s: couldn't find an entry for rate=%d\n",
3524             __func__,
3525             rate);
3526         return (0);
3527 }
3528
3529 /*
3530  * Fill in the rate related information for a transmit command.
3531  */
3532 static const struct iwm_rate *
3533 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3534         struct mbuf *m, struct iwm_tx_cmd *tx)
3535 {
3536         struct ieee80211_node *ni = &in->in_ni;
3537         struct ieee80211_frame *wh;
3538         const struct ieee80211_txparam *tp = ni->ni_txparms;
3539         const struct iwm_rate *rinfo;
3540         int type;
3541         int ridx, rate_flags;
3542
3543         wh = mtod(m, struct ieee80211_frame *);
3544         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3545
3546         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3547         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3548
3549         if (type == IEEE80211_FC0_TYPE_MGT ||
3550             type == IEEE80211_FC0_TYPE_CTL ||
3551             (m->m_flags & M_EAPOL) != 0) {
3552                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3553                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3554                     "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3555         } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3556                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3557                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3558                     "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3559         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3560                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3561                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3562                     "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3563         } else {
3564                 int i;
3565
3566                 /* for data frames, use RS table */
3567                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3568                 /* XXX pass pktlen */
3569                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3570                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3571                 ridx = in->in_ridx[i];
3572
3573                 /* This is the index into the programmed table */
3574                 tx->initial_rate_index = i;
3575                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3576
3577                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3578                     "%s: start with i=%d, txrate %d\n",
3579                     __func__, i, iwm_rates[ridx].rate);
3580         }
3581
3582         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3583             "%s: frame type=%d txrate %d\n",
3584                 __func__, type, iwm_rates[ridx].rate);
3585
3586         rinfo = &iwm_rates[ridx];
3587
3588         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3589             __func__, ridx,
3590             rinfo->rate,
3591             !! (IWM_RIDX_IS_CCK(ridx))
3592             );
3593
3594         /* XXX TODO: hard-coded TX antenna? */
3595         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3596         if (IWM_RIDX_IS_CCK(ridx))
3597                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3598         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3599
3600         return rinfo;
3601 }
3602
3603 #define TB0_SIZE 16
3604 static int
3605 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3606 {
3607         struct ieee80211com *ic = &sc->sc_ic;
3608         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3609         struct iwm_node *in = IWM_NODE(ni);
3610         struct iwm_tx_ring *ring;
3611         struct iwm_tx_data *data;
3612         struct iwm_tfd *desc;
3613         struct iwm_device_cmd *cmd;
3614         struct iwm_tx_cmd *tx;
3615         struct ieee80211_frame *wh;
3616         struct ieee80211_key *k = NULL;
3617         struct mbuf *m1;
3618         const struct iwm_rate *rinfo;
3619         uint32_t flags;
3620         u_int hdrlen;
3621         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3622         int nsegs;
3623         uint8_t tid, type;
3624         int i, totlen, error, pad;
3625
3626         wh = mtod(m, struct ieee80211_frame *);
3627         hdrlen = ieee80211_anyhdrsize(wh);
3628         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3629         tid = 0;
3630         ring = &sc->txq[ac];
3631         desc = &ring->desc[ring->cur];
3632         memset(desc, 0, sizeof(*desc));
3633         data = &ring->data[ring->cur];
3634
3635         /* Fill out iwm_tx_cmd to send to the firmware */
3636         cmd = &ring->cmd[ring->cur];
3637         cmd->hdr.code = IWM_TX_CMD;
3638         cmd->hdr.flags = 0;
3639         cmd->hdr.qid = ring->qid;
3640         cmd->hdr.idx = ring->cur;
3641
3642         tx = (void *)cmd->data;
3643         memset(tx, 0, sizeof(*tx));
3644
3645         rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3646
3647         /* Encrypt the frame if need be. */
3648         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3649                 /* Retrieve key for TX && do software encryption. */
3650                 k = ieee80211_crypto_encap(ni, m);
3651                 if (k == NULL) {
3652                         m_freem(m);
3653                         return (ENOBUFS);
3654                 }
3655                 /* 802.11 header may have moved. */
3656                 wh = mtod(m, struct ieee80211_frame *);
3657         }
3658
3659         if (ieee80211_radiotap_active_vap(vap)) {
3660                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3661
3662                 tap->wt_flags = 0;
3663                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3664                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3665                 tap->wt_rate = rinfo->rate;
3666                 if (k != NULL)
3667                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3668                 ieee80211_radiotap_tx(vap, m);
3669         }
3670
3671
3672         totlen = m->m_pkthdr.len;
3673
3674         flags = 0;
3675         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3676                 flags |= IWM_TX_CMD_FLG_ACK;
3677         }
3678
3679         if (type == IEEE80211_FC0_TYPE_DATA
3680             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3681             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3682                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3683         }
3684
3685         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3686             type != IEEE80211_FC0_TYPE_DATA)
3687                 tx->sta_id = sc->sc_aux_sta.sta_id;
3688         else
3689                 tx->sta_id = IWM_STATION_ID;
3690
3691         if (type == IEEE80211_FC0_TYPE_MGT) {
3692                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3693
3694                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3695                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3696                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3697                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3698                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3699                 } else {
3700                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3701                 }
3702         } else {
3703                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3704         }
3705
3706         if (hdrlen & 3) {
3707                 /* First segment length must be a multiple of 4. */
3708                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3709                 pad = 4 - (hdrlen & 3);
3710         } else
3711                 pad = 0;
3712
3713         tx->driver_txop = 0;
3714         tx->next_frame_len = 0;
3715
3716         tx->len = htole16(totlen);
3717         tx->tid_tspec = tid;
3718         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3719
3720         /* Set physical address of "scratch area". */
3721         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3722         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3723
3724         /* Copy 802.11 header in TX command. */
3725         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3726
3727         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3728
3729         tx->sec_ctl = 0;
3730         tx->tx_flags |= htole32(flags);
3731
3732         /* Trim 802.11 header. */
3733         m_adj(m, hdrlen);
3734         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3735             segs, &nsegs, BUS_DMA_NOWAIT);
3736         if (error != 0) {
3737                 if (error != EFBIG) {
3738                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3739                             error);
3740                         m_freem(m);
3741                         return error;
3742                 }
3743                 /* Too many DMA segments, linearize mbuf. */
3744                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3745                 if (m1 == NULL) {
3746                         device_printf(sc->sc_dev,
3747                             "%s: could not defrag mbuf\n", __func__);
3748                         m_freem(m);
3749                         return (ENOBUFS);
3750                 }
3751                 m = m1;
3752
3753                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3754                     segs, &nsegs, BUS_DMA_NOWAIT);
3755                 if (error != 0) {
3756                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3757                             error);
3758                         m_freem(m);
3759                         return error;
3760                 }
3761         }
3762         data->m = m;
3763         data->in = in;
3764         data->done = 0;
3765
3766         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3767             "sending txd %p, in %p\n", data, data->in);
3768         KASSERT(data->in != NULL, ("node is NULL"));
3769
3770         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3771             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3772             ring->qid, ring->cur, totlen, nsegs,
3773             le32toh(tx->tx_flags),
3774             le32toh(tx->rate_n_flags),
3775             tx->initial_rate_index
3776             );
3777
3778         /* Fill TX descriptor. */
3779         desc->num_tbs = 2 + nsegs;
3780
3781         desc->tbs[0].lo = htole32(data->cmd_paddr);
3782         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3783             (TB0_SIZE << 4);
3784         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3785         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3786             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3787               + hdrlen + pad - TB0_SIZE) << 4);
3788
3789         /* Other DMA segments are for data payload. */
3790         for (i = 0; i < nsegs; i++) {
3791                 seg = &segs[i];
3792                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3793                 desc->tbs[i+2].hi_n_len = \
3794                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3795                     | ((seg->ds_len) << 4);
3796         }
3797
3798         bus_dmamap_sync(ring->data_dmat, data->map,
3799             BUS_DMASYNC_PREWRITE);
3800         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3801             BUS_DMASYNC_PREWRITE);
3802         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3803             BUS_DMASYNC_PREWRITE);
3804
3805 #if 0
3806         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3807 #endif
3808
3809         /* Kick TX ring. */
3810         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3811         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3812
3813         /* Mark TX ring as full if we reach a certain threshold. */
3814         if (++ring->queued > IWM_TX_RING_HIMARK) {
3815                 sc->qfullmsk |= 1 << ring->qid;
3816         }
3817
3818         return 0;
3819 }
3820
3821 static int
3822 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3823     const struct ieee80211_bpf_params *params)
3824 {
3825         struct ieee80211com *ic = ni->ni_ic;
3826         struct iwm_softc *sc = ic->ic_softc;
3827         int error = 0;
3828
3829         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3830             "->%s begin\n", __func__);
3831
3832         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3833                 m_freem(m);
3834                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3835                     "<-%s not RUNNING\n", __func__);
3836                 return (ENETDOWN);
3837         }
3838
3839         IWM_LOCK(sc);
3840         /* XXX fix this */
3841         if (params == NULL) {
3842                 error = iwm_tx(sc, m, ni, 0);
3843         } else {
3844                 error = iwm_tx(sc, m, ni, 0);
3845         }
3846         sc->sc_tx_timer = 5;
3847         IWM_UNLOCK(sc);
3848
3849         return (error);
3850 }
3851
3852 /*
3853  * mvm/tx.c
3854  */
3855
3856 /*
3857  * Note that there are transports that buffer frames before they reach
3858  * the firmware. This means that after flush_tx_path is called, the
3859  * queue might not be empty. The race-free way to handle this is to:
3860  * 1) set the station as draining
3861  * 2) flush the Tx path
3862  * 3) wait for the transport queues to be empty
3863  */
3864 int
3865 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3866 {
3867         int ret;
3868         struct iwm_tx_path_flush_cmd flush_cmd = {
3869                 .queues_ctl = htole32(tfd_msk),
3870                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3871         };
3872
3873         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3874             sizeof(flush_cmd), &flush_cmd);
3875         if (ret)
3876                 device_printf(sc->sc_dev,
3877                     "Flushing tx queue failed: %d\n", ret);
3878         return ret;
3879 }
3880
3881 /*
3882  * BEGIN mvm/quota.c
3883  */
3884
3885 static int
3886 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3887 {
3888         struct iwm_time_quota_cmd cmd;
3889         int i, idx, ret, num_active_macs, quota, quota_rem;
3890         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3891         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3892         uint16_t id;
3893
3894         memset(&cmd, 0, sizeof(cmd));
3895
3896         /* currently, PHY ID == binding ID */
3897         if (ivp) {
3898                 id = ivp->phy_ctxt->id;
3899                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3900                 colors[id] = ivp->phy_ctxt->color;
3901
3902                 if (1)
3903                         n_ifs[id] = 1;
3904         }
3905
3906         /*
3907          * The FW's scheduling session consists of
3908          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3909          * equally between all the bindings that require quota
3910          */
3911         num_active_macs = 0;
3912         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3913                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3914                 num_active_macs += n_ifs[i];
3915         }
3916
3917         quota = 0;
3918         quota_rem = 0;
3919         if (num_active_macs) {
3920                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3921                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3922         }
3923
3924         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3925                 if (colors[i] < 0)
3926                         continue;
3927
3928                 cmd.quotas[idx].id_and_color =
3929                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3930
3931                 if (n_ifs[i] <= 0) {
3932                         cmd.quotas[idx].quota = htole32(0);
3933                         cmd.quotas[idx].max_duration = htole32(0);
3934                 } else {
3935                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3936                         cmd.quotas[idx].max_duration = htole32(0);
3937                 }
3938                 idx++;
3939         }
3940
3941         /* Give the remainder of the session to the first binding */
3942         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3943
3944         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3945             sizeof(cmd), &cmd);
3946         if (ret)
3947                 device_printf(sc->sc_dev,
3948                     "%s: Failed to send quota: %d\n", __func__, ret);
3949         return ret;
3950 }
3951
3952 /*
3953  * END mvm/quota.c
3954  */
3955
3956 /*
3957  * ieee80211 routines
3958  */
3959
3960 /*
3961  * Change to AUTH state in 80211 state machine.  Roughly matches what
3962  * Linux does in bss_info_changed().
3963  */
3964 static int
3965 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3966 {
3967         struct ieee80211_node *ni;
3968         struct iwm_node *in;
3969         struct iwm_vap *iv = IWM_VAP(vap);
3970         uint32_t duration;
3971         int error;
3972
3973         /*
3974          * XXX i have a feeling that the vap node is being
3975          * freed from underneath us. Grr.
3976          */
3977         ni = ieee80211_ref_node(vap->iv_bss);
3978         in = IWM_NODE(ni);
3979         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3980             "%s: called; vap=%p, bss ni=%p\n",
3981             __func__,
3982             vap,
3983             ni);
3984
3985         in->in_assoc = 0;
3986
3987         /*
3988          * Firmware bug - it'll crash if the beacon interval is less
3989          * than 16. We can't avoid connecting at all, so refuse the
3990          * station state change, this will cause net80211 to abandon
3991          * attempts to connect to this AP, and eventually wpa_s will
3992          * blacklist the AP...
3993          */
3994         if (ni->ni_intval < 16) {
3995                 device_printf(sc->sc_dev,
3996                     "AP %s beacon interval is %d, refusing due to firmware bug!\n",
3997                     ether_sprintf(ni->ni_bssid), ni->ni_intval);
3998                 error = EINVAL;
3999                 goto out;
4000         }
4001
4002         error = iwm_allow_mcast(vap, sc);
4003         if (error) {
4004                 device_printf(sc->sc_dev,
4005                     "%s: failed to set multicast\n", __func__);
4006                 goto out;
4007         }
4008
4009         /*
4010          * This is where it deviates from what Linux does.
4011          *
4012          * Linux iwlwifi doesn't reset the nic each time, nor does it
4013          * call ctxt_add() here.  Instead, it adds it during vap creation,
4014          * and always does a mac_ctx_changed().
4015          *
4016          * The openbsd port doesn't attempt to do that - it reset things
4017          * at odd states and does the add here.
4018          *
4019          * So, until the state handling is fixed (ie, we never reset
4020          * the NIC except for a firmware failure, which should drag
4021          * the NIC back to IDLE, re-setup and re-add all the mac/phy
4022          * contexts that are required), let's do a dirty hack here.
4023          */
4024         if (iv->is_uploaded) {
4025                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4026                         device_printf(sc->sc_dev,
4027                             "%s: failed to update MAC\n", __func__);
4028                         goto out;
4029                 }
4030         } else {
4031                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4032                         device_printf(sc->sc_dev,
4033                             "%s: failed to add MAC\n", __func__);
4034                         goto out;
4035                 }
4036         }
4037
4038         if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4039             in->in_ni.ni_chan, 1, 1)) != 0) {
4040                 device_printf(sc->sc_dev,
4041                     "%s: failed update phy ctxt\n", __func__);
4042                 goto out;
4043         }
4044         iv->phy_ctxt = &sc->sc_phyctxt[0];
4045
4046         if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4047                 device_printf(sc->sc_dev,
4048                     "%s: binding update cmd\n", __func__);
4049                 goto out;
4050         }
4051         /*
4052          * Authentication becomes unreliable when powersaving is left enabled
4053          * here. Powersaving will be activated again when association has
4054          * finished or is aborted.
4055          */
4056         iv->ps_disabled = TRUE;
4057         error = iwm_mvm_power_update_mac(sc);
4058         iv->ps_disabled = FALSE;
4059         if (error != 0) {
4060                 device_printf(sc->sc_dev,
4061                     "%s: failed to update power management\n",
4062                     __func__);
4063                 goto out;
4064         }
4065         if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4066                 device_printf(sc->sc_dev,
4067                     "%s: failed to add sta\n", __func__);
4068                 goto out;
4069         }
4070
4071         /*
4072          * Prevent the FW from wandering off channel during association
4073          * by "protecting" the session with a time event.
4074          */
4075         /* XXX duration is in units of TU, not MS */
4076         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4077         iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4078         DELAY(100);
4079
4080         error = 0;
4081 out:
4082         ieee80211_free_node(ni);
4083         return (error);
4084 }
4085
4086 static int
4087 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4088 {
4089         uint32_t tfd_msk;
4090
4091         /*
4092          * Ok, so *technically* the proper set of calls for going
4093          * from RUN back to SCAN is:
4094          *
4095          * iwm_mvm_power_mac_disable(sc, in);
4096          * iwm_mvm_mac_ctxt_changed(sc, vap);
4097          * iwm_mvm_rm_sta(sc, in);
4098          * iwm_mvm_update_quotas(sc, NULL);
4099          * iwm_mvm_mac_ctxt_changed(sc, in);
4100          * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4101          * iwm_mvm_mac_ctxt_remove(sc, in);
4102          *
4103          * However, that freezes the device not matter which permutations
4104          * and modifications are attempted.  Obviously, this driver is missing
4105          * something since it works in the Linux driver, but figuring out what
4106          * is missing is a little more complicated.  Now, since we're going
4107          * back to nothing anyway, we'll just do a complete device reset.
4108          * Up your's, device!
4109          */
4110         /*
4111          * Just using 0xf for the queues mask is fine as long as we only
4112          * get here from RUN state.
4113          */
4114         tfd_msk = 0xf;
4115         mbufq_drain(&sc->sc_snd);
4116         iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4117         /*
4118          * We seem to get away with just synchronously sending the
4119          * IWM_TXPATH_FLUSH command.
4120          */
4121 //      iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4122         iwm_stop_device(sc);
4123         iwm_init_hw(sc);
4124         if (in)
4125                 in->in_assoc = 0;
4126         return 0;
4127
4128 #if 0
4129         int error;
4130
4131         iwm_mvm_power_mac_disable(sc, in);
4132
4133         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4134                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4135                 return error;
4136         }
4137
4138         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4139                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4140                 return error;
4141         }
4142         error = iwm_mvm_rm_sta(sc, in);
4143         in->in_assoc = 0;
4144         iwm_mvm_update_quotas(sc, NULL);
4145         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4146                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4147                 return error;
4148         }
4149         iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4150
4151         iwm_mvm_mac_ctxt_remove(sc, in);
4152
4153         return error;
4154 #endif
4155 }
4156
4157 static struct ieee80211_node *
4158 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4159 {
4160         return malloc(sizeof (struct iwm_node), M_80211_NODE,
4161             M_NOWAIT | M_ZERO);
4162 }
4163
4164 uint8_t
4165 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4166 {
4167         int i;
4168         uint8_t rval;
4169
4170         for (i = 0; i < rs->rs_nrates; i++) {
4171                 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4172                 if (rval == iwm_rates[ridx].rate)
4173                         return rs->rs_rates[i];
4174         }
4175
4176         return 0;
4177 }
4178
4179 static void
4180 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4181 {
4182         struct ieee80211_node *ni = &in->in_ni;
4183         struct iwm_lq_cmd *lq = &in->in_lq;
4184         int nrates = ni->ni_rates.rs_nrates;
4185         int i, ridx, tab = 0;
4186 //      int txant = 0;
4187
4188         if (nrates > nitems(lq->rs_table)) {
4189                 device_printf(sc->sc_dev,
4190                     "%s: node supports %d rates, driver handles "
4191                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4192                 return;
4193         }
4194         if (nrates == 0) {
4195                 device_printf(sc->sc_dev,
4196                     "%s: node supports 0 rates, odd!\n", __func__);
4197                 return;
4198         }
4199
4200         /*
4201          * XXX .. and most of iwm_node is not initialised explicitly;
4202          * it's all just 0x0 passed to the firmware.
4203          */
4204
4205         /* first figure out which rates we should support */
4206         /* XXX TODO: this isn't 11n aware /at all/ */
4207         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4208         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4209             "%s: nrates=%d\n", __func__, nrates);
4210
4211         /*
4212          * Loop over nrates and populate in_ridx from the highest
4213          * rate to the lowest rate.  Remember, in_ridx[] has
4214          * IEEE80211_RATE_MAXSIZE entries!
4215          */
4216         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4217                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4218
4219                 /* Map 802.11 rate to HW rate index. */
4220                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4221                         if (iwm_rates[ridx].rate == rate)
4222                                 break;
4223                 if (ridx > IWM_RIDX_MAX) {
4224                         device_printf(sc->sc_dev,
4225                             "%s: WARNING: device rate for %d not found!\n",
4226                             __func__, rate);
4227                 } else {
4228                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4229                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4230                             __func__,
4231                             i,
4232                             rate,
4233                             ridx);
4234                         in->in_ridx[i] = ridx;
4235                 }
4236         }
4237
4238         /* then construct a lq_cmd based on those */
4239         memset(lq, 0, sizeof(*lq));
4240         lq->sta_id = IWM_STATION_ID;
4241
4242         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4243         if (ni->ni_flags & IEEE80211_NODE_HT)
4244                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4245
4246         /*
4247          * are these used? (we don't do SISO or MIMO)
4248          * need to set them to non-zero, though, or we get an error.
4249          */
4250         lq->single_stream_ant_msk = 1;
4251         lq->dual_stream_ant_msk = 1;
4252
4253         /*
4254          * Build the actual rate selection table.
4255          * The lowest bits are the rates.  Additionally,
4256          * CCK needs bit 9 to be set.  The rest of the bits
4257          * we add to the table select the tx antenna
4258          * Note that we add the rates in the highest rate first
4259          * (opposite of ni_rates).
4260          */
4261         /*
4262          * XXX TODO: this should be looping over the min of nrates
4263          * and LQ_MAX_RETRY_NUM.  Sigh.
4264          */
4265         for (i = 0; i < nrates; i++) {
4266                 int nextant;
4267
4268 #if 0
4269                 if (txant == 0)
4270                         txant = iwm_mvm_get_valid_tx_ant(sc);
4271                 nextant = 1<<(ffs(txant)-1);
4272                 txant &= ~nextant;
4273 #else
4274                 nextant = iwm_mvm_get_valid_tx_ant(sc);
4275 #endif
4276                 /*
4277                  * Map the rate id into a rate index into
4278                  * our hardware table containing the
4279                  * configuration to use for this rate.
4280                  */
4281                 ridx = in->in_ridx[i];
4282                 tab = iwm_rates[ridx].plcp;
4283                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4284                 if (IWM_RIDX_IS_CCK(ridx))
4285                         tab |= IWM_RATE_MCS_CCK_MSK;
4286                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4287                     "station rate i=%d, rate=%d, hw=%x\n",
4288                     i, iwm_rates[ridx].rate, tab);
4289                 lq->rs_table[i] = htole32(tab);
4290         }
4291         /* then fill the rest with the lowest possible rate */
4292         for (i = nrates; i < nitems(lq->rs_table); i++) {
4293                 KASSERT(tab != 0, ("invalid tab"));
4294                 lq->rs_table[i] = htole32(tab);
4295         }
4296 }
4297
4298 static int
4299 iwm_media_change(struct ifnet *ifp)
4300 {
4301         struct ieee80211vap *vap = ifp->if_softc;
4302         struct ieee80211com *ic = vap->iv_ic;
4303         struct iwm_softc *sc = ic->ic_softc;
4304         int error;
4305
4306         error = ieee80211_media_change(ifp);
4307         if (error != ENETRESET)
4308                 return error;
4309
4310         IWM_LOCK(sc);
4311         if (ic->ic_nrunning > 0) {
4312                 iwm_stop(sc);
4313                 iwm_init(sc);
4314         }
4315         IWM_UNLOCK(sc);
4316         return error;
4317 }
4318
4319
4320 static int
4321 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4322 {
4323         struct iwm_vap *ivp = IWM_VAP(vap);
4324         struct ieee80211com *ic = vap->iv_ic;
4325         struct iwm_softc *sc = ic->ic_softc;
4326         struct iwm_node *in;
4327         int error;
4328
4329         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4330             "switching state %s -> %s\n",
4331             ieee80211_state_name[vap->iv_state],
4332             ieee80211_state_name[nstate]);
4333         IEEE80211_UNLOCK(ic);
4334         IWM_LOCK(sc);
4335
4336         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4337                 iwm_led_blink_stop(sc);
4338
4339         /* disable beacon filtering if we're hopping out of RUN */
4340         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4341                 iwm_mvm_disable_beacon_filter(sc);
4342
4343                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4344                         in->in_assoc = 0;
4345
4346                 if (nstate == IEEE80211_S_INIT) {
4347                         IWM_UNLOCK(sc);
4348                         IEEE80211_LOCK(ic);
4349                         error = ivp->iv_newstate(vap, nstate, arg);
4350                         IEEE80211_UNLOCK(ic);
4351                         IWM_LOCK(sc);
4352                         iwm_release(sc, NULL);
4353                         IWM_UNLOCK(sc);
4354                         IEEE80211_LOCK(ic);
4355                         return error;
4356                 }
4357
4358                 /*
4359                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4360                  * above then the card will be completely reinitialized,
4361                  * so the driver must do everything necessary to bring the card
4362                  * from INIT to SCAN.
4363                  *
4364                  * Additionally, upon receiving deauth frame from AP,
4365                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4366                  * state. This will also fail with this driver, so bring the FSM
4367                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4368                  *
4369                  * XXX TODO: fix this for FreeBSD!
4370                  */
4371                 if (nstate == IEEE80211_S_SCAN ||
4372                     nstate == IEEE80211_S_AUTH ||
4373                     nstate == IEEE80211_S_ASSOC) {
4374                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4375                             "Force transition to INIT; MGT=%d\n", arg);
4376                         IWM_UNLOCK(sc);
4377                         IEEE80211_LOCK(ic);
4378                         /* Always pass arg as -1 since we can't Tx right now. */
4379                         /*
4380                          * XXX arg is just ignored anyway when transitioning
4381                          *     to IEEE80211_S_INIT.
4382                          */
4383                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4384                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4385                             "Going INIT->SCAN\n");
4386                         nstate = IEEE80211_S_SCAN;
4387                         IEEE80211_UNLOCK(ic);
4388                         IWM_LOCK(sc);
4389                 }
4390         }
4391
4392         switch (nstate) {
4393         case IEEE80211_S_INIT:
4394         case IEEE80211_S_SCAN:
4395                 if (vap->iv_state == IEEE80211_S_AUTH ||
4396                     vap->iv_state == IEEE80211_S_ASSOC) {
4397                         int myerr;
4398                         IWM_UNLOCK(sc);
4399                         IEEE80211_LOCK(ic);
4400                         myerr = ivp->iv_newstate(vap, nstate, arg);
4401                         IEEE80211_UNLOCK(ic);
4402                         IWM_LOCK(sc);
4403                         error = iwm_mvm_rm_sta(sc, vap, FALSE);
4404                         if (error) {
4405                                 device_printf(sc->sc_dev,
4406                                     "%s: Failed to remove station: %d\n",
4407                                     __func__, error);
4408                         }
4409                         error = iwm_mvm_mac_ctxt_changed(sc, vap);
4410                         if (error) {
4411                                 device_printf(sc->sc_dev,
4412                                     "%s: Failed to change mac context: %d\n",
4413                                     __func__, error);
4414                         }
4415                         error = iwm_mvm_binding_remove_vif(sc, ivp);
4416                         if (error) {
4417                                 device_printf(sc->sc_dev,
4418                                     "%s: Failed to remove channel ctx: %d\n",
4419                                     __func__, error);
4420                         }
4421                         ivp->phy_ctxt = NULL;
4422                         error = iwm_mvm_power_update_mac(sc);
4423                         if (error != 0) {
4424                                 device_printf(sc->sc_dev,
4425                                     "%s: failed to update power management\n",
4426                                     __func__);
4427                         }
4428                         IWM_UNLOCK(sc);
4429                         IEEE80211_LOCK(ic);
4430                         return myerr;
4431                 }
4432                 break;
4433
4434         case IEEE80211_S_AUTH:
4435                 if ((error = iwm_auth(vap, sc)) != 0) {
4436                         device_printf(sc->sc_dev,
4437                             "%s: could not move to auth state: %d\n",
4438                             __func__, error);
4439                 }
4440                 break;
4441
4442         case IEEE80211_S_ASSOC:
4443                 /*
4444                  * EBS may be disabled due to previous failures reported by FW.
4445                  * Reset EBS status here assuming environment has been changed.
4446                  */
4447                 sc->last_ebs_successful = TRUE;
4448                 break;
4449
4450         case IEEE80211_S_RUN:
4451         {
4452                 struct iwm_host_cmd cmd = {
4453                         .id = IWM_LQ_CMD,
4454                         .len = { sizeof(in->in_lq), },
4455                         .flags = IWM_CMD_SYNC,
4456                 };
4457
4458                 in = IWM_NODE(vap->iv_bss);
4459                 /* Update the association state, now we have it all */
4460                 /* (eg associd comes in at this point */
4461                 error = iwm_mvm_update_sta(sc, in);
4462                 if (error != 0) {
4463                         device_printf(sc->sc_dev,
4464                             "%s: failed to update STA\n", __func__);
4465                         IWM_UNLOCK(sc);
4466                         IEEE80211_LOCK(ic);
4467                         return error;
4468                 }
4469                 in->in_assoc = 1;
4470                 error = iwm_mvm_mac_ctxt_changed(sc, vap);
4471                 if (error != 0) {
4472                         device_printf(sc->sc_dev,
4473                             "%s: failed to update MAC: %d\n", __func__, error);
4474                 }
4475
4476                 iwm_mvm_sf_update(sc, vap, FALSE);
4477                 iwm_mvm_enable_beacon_filter(sc, ivp);
4478                 iwm_mvm_power_update_mac(sc);
4479                 iwm_mvm_update_quotas(sc, ivp);
4480                 iwm_setrates(sc, in);
4481
4482                 cmd.data[0] = &in->in_lq;
4483                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4484                         device_printf(sc->sc_dev,
4485                             "%s: IWM_LQ_CMD failed\n", __func__);
4486                 }
4487
4488                 iwm_mvm_led_enable(sc);
4489                 break;
4490         }
4491
4492         default:
4493                 break;
4494         }
4495         IWM_UNLOCK(sc);
4496         IEEE80211_LOCK(ic);
4497
4498         return (ivp->iv_newstate(vap, nstate, arg));
4499 }
4500
4501 void
4502 iwm_endscan_cb(void *arg, int pending)
4503 {
4504         struct iwm_softc *sc = arg;
4505         struct ieee80211com *ic = &sc->sc_ic;
4506
4507         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4508             "%s: scan ended\n",
4509             __func__);
4510
4511         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4512 }
4513
4514 static int
4515 iwm_send_bt_init_conf(struct iwm_softc *sc)
4516 {
4517         struct iwm_bt_coex_cmd bt_cmd;
4518
4519         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4520         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4521
4522         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4523             &bt_cmd);
4524 }
4525
4526 static boolean_t
4527 iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4528 {
4529         boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4530         boolean_t tlv_lar = fw_has_capa(&sc->ucode_capa,
4531                                         IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4532
4533         if (iwm_lar_disable)
4534                 return FALSE;
4535
4536         /*
4537          * Enable LAR only if it is supported by the FW (TLV) &&
4538          * enabled in the NVM
4539          */
4540         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4541                 return nvm_lar && tlv_lar;
4542         else
4543                 return tlv_lar;
4544 }
4545
4546 static boolean_t
4547 iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4548 {
4549         return fw_has_api(&sc->ucode_capa,
4550                           IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4551                fw_has_capa(&sc->ucode_capa,
4552                            IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4553 }
4554
4555 static int
4556 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4557 {
4558         struct iwm_mcc_update_cmd mcc_cmd;
4559         struct iwm_host_cmd hcmd = {
4560                 .id = IWM_MCC_UPDATE_CMD,
4561                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4562                 .data = { &mcc_cmd },
4563         };
4564         int ret;
4565 #ifdef IWM_DEBUG
4566         struct iwm_rx_packet *pkt;
4567         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4568         struct iwm_mcc_update_resp *mcc_resp;
4569         int n_channels;
4570         uint16_t mcc;
4571 #endif
4572         int resp_v2 = fw_has_capa(&sc->ucode_capa,
4573             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4574
4575         if (!iwm_mvm_is_lar_supported(sc)) {
4576                 IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4577                     __func__);
4578                 return 0;
4579         }
4580
4581         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4582         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4583         if (iwm_mvm_is_wifi_mcc_supported(sc))
4584                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4585         else
4586                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4587
4588         if (resp_v2)
4589                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4590         else
4591                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4592
4593         IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4594             "send MCC update to FW with '%c%c' src = %d\n",
4595             alpha2[0], alpha2[1], mcc_cmd.source_id);
4596
4597         ret = iwm_send_cmd(sc, &hcmd);
4598         if (ret)
4599                 return ret;
4600
4601 #ifdef IWM_DEBUG
4602         pkt = hcmd.resp_pkt;
4603
4604         /* Extract MCC response */
4605         if (resp_v2) {
4606                 mcc_resp = (void *)pkt->data;
4607                 mcc = mcc_resp->mcc;
4608                 n_channels =  le32toh(mcc_resp->n_channels);
4609         } else {
4610                 mcc_resp_v1 = (void *)pkt->data;
4611                 mcc = mcc_resp_v1->mcc;
4612                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4613         }
4614
4615         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4616         if (mcc == 0)
4617                 mcc = 0x3030;  /* "00" - world */
4618
4619         IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4620             "regulatory domain '%c%c' (%d channels available)\n",
4621             mcc >> 8, mcc & 0xff, n_channels);
4622 #endif
4623         iwm_free_resp(sc, &hcmd);
4624
4625         return 0;
4626 }
4627
4628 static void
4629 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4630 {
4631         struct iwm_host_cmd cmd = {
4632                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4633                 .len = { sizeof(uint32_t), },
4634                 .data = { &backoff, },
4635         };
4636
4637         if (iwm_send_cmd(sc, &cmd) != 0) {
4638                 device_printf(sc->sc_dev,
4639                     "failed to change thermal tx backoff\n");
4640         }
4641 }
4642
4643 static int
4644 iwm_init_hw(struct iwm_softc *sc)
4645 {
4646         struct ieee80211com *ic = &sc->sc_ic;
4647         int error, i, ac;
4648
4649         sc->sf_state = IWM_SF_UNINIT;
4650
4651         if ((error = iwm_start_hw(sc)) != 0) {
4652                 printf("iwm_start_hw: failed %d\n", error);
4653                 return error;
4654         }
4655
4656         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4657                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4658                 return error;
4659         }
4660
4661         /*
4662          * should stop and start HW since that INIT
4663          * image just loaded
4664          */
4665         iwm_stop_device(sc);
4666         sc->sc_ps_disabled = FALSE;
4667         if ((error = iwm_start_hw(sc)) != 0) {
4668                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4669                 return error;
4670         }
4671
4672         /* omstart, this time with the regular firmware */
4673         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4674         if (error) {
4675                 device_printf(sc->sc_dev, "could not load firmware\n");
4676                 goto error;
4677         }
4678
4679         error = iwm_mvm_sf_update(sc, NULL, FALSE);
4680         if (error)
4681                 device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4682
4683         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4684                 device_printf(sc->sc_dev, "bt init conf failed\n");
4685                 goto error;
4686         }
4687
4688         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4689         if (error != 0) {
4690                 device_printf(sc->sc_dev, "antenna config failed\n");
4691                 goto error;
4692         }
4693
4694         /* Send phy db control command and then phy db calibration */
4695         if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4696                 goto error;
4697
4698         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4699                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4700                 goto error;
4701         }
4702
4703         /* Add auxiliary station for scanning */
4704         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4705                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4706                 goto error;
4707         }
4708
4709         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4710                 /*
4711                  * The channel used here isn't relevant as it's
4712                  * going to be overwritten in the other flows.
4713                  * For now use the first channel we have.
4714                  */
4715                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4716                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4717                         goto error;
4718         }
4719
4720         /* Initialize tx backoffs to the minimum. */
4721         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4722                 iwm_mvm_tt_tx_backoff(sc, 0);
4723
4724         error = iwm_mvm_power_update_device(sc);
4725         if (error)
4726                 goto error;
4727
4728         if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4729                 goto error;
4730
4731         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4732                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4733                         goto error;
4734         }
4735
4736         /* Enable Tx queues. */
4737         for (ac = 0; ac < WME_NUM_AC; ac++) {
4738                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4739                     iwm_mvm_ac_to_tx_fifo[ac]);
4740                 if (error)
4741                         goto error;
4742         }
4743
4744         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4745                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4746                 goto error;
4747         }
4748
4749         return 0;
4750
4751  error:
4752         iwm_stop_device(sc);
4753         return error;
4754 }
4755
4756 /* Allow multicast from our BSSID. */
4757 static int
4758 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4759 {
4760         struct ieee80211_node *ni = vap->iv_bss;
4761         struct iwm_mcast_filter_cmd *cmd;
4762         size_t size;
4763         int error;
4764
4765         size = roundup(sizeof(*cmd), 4);
4766         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4767         if (cmd == NULL)
4768                 return ENOMEM;
4769         cmd->filter_own = 1;
4770         cmd->port_id = 0;
4771         cmd->count = 0;
4772         cmd->pass_all = 1;
4773         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4774
4775         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4776             IWM_CMD_SYNC, size, cmd);
4777         free(cmd, M_DEVBUF);
4778
4779         return (error);
4780 }
4781
4782 /*
4783  * ifnet interfaces
4784  */
4785
4786 static void
4787 iwm_init(struct iwm_softc *sc)
4788 {
4789         int error;
4790
4791         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4792                 return;
4793         }
4794         sc->sc_generation++;
4795         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4796
4797         if ((error = iwm_init_hw(sc)) != 0) {
4798                 printf("iwm_init_hw failed %d\n", error);
4799                 iwm_stop(sc);
4800                 return;
4801         }
4802
4803         /*
4804          * Ok, firmware loaded and we are jogging
4805          */
4806         sc->sc_flags |= IWM_FLAG_HW_INITED;
4807         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4808 }
4809
4810 static int
4811 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4812 {
4813         struct iwm_softc *sc;
4814         int error;
4815
4816         sc = ic->ic_softc;
4817
4818         IWM_LOCK(sc);
4819         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4820                 IWM_UNLOCK(sc);
4821                 return (ENXIO);
4822         }
4823         error = mbufq_enqueue(&sc->sc_snd, m);
4824         if (error) {
4825                 IWM_UNLOCK(sc);
4826                 return (error);
4827         }
4828         iwm_start(sc);
4829         IWM_UNLOCK(sc);
4830         return (0);
4831 }
4832
4833 /*
4834  * Dequeue packets from sendq and call send.
4835  */
4836 static void
4837 iwm_start(struct iwm_softc *sc)
4838 {
4839         struct ieee80211_node *ni;
4840         struct mbuf *m;
4841         int ac = 0;
4842
4843         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4844         while (sc->qfullmsk == 0 &&
4845                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4846                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4847                 if (iwm_tx(sc, m, ni, ac) != 0) {
4848                         if_inc_counter(ni->ni_vap->iv_ifp,
4849                             IFCOUNTER_OERRORS, 1);
4850                         ieee80211_free_node(ni);
4851                         continue;
4852                 }
4853                 sc->sc_tx_timer = 15;
4854         }
4855         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4856 }
4857
4858 static void
4859 iwm_stop(struct iwm_softc *sc)
4860 {
4861
4862         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4863         sc->sc_flags |= IWM_FLAG_STOPPED;
4864         sc->sc_generation++;
4865         iwm_led_blink_stop(sc);
4866         sc->sc_tx_timer = 0;
4867         iwm_stop_device(sc);
4868         sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4869 }
4870
4871 static void
4872 iwm_watchdog(void *arg)
4873 {
4874         struct iwm_softc *sc = arg;
4875         struct ieee80211com *ic = &sc->sc_ic;
4876
4877         if (sc->sc_tx_timer > 0) {
4878                 if (--sc->sc_tx_timer == 0) {
4879                         device_printf(sc->sc_dev, "device timeout\n");
4880 #ifdef IWM_DEBUG
4881                         iwm_nic_error(sc);
4882 #endif
4883                         ieee80211_restart_all(ic);
4884                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4885                         return;
4886                 }
4887         }
4888         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4889 }
4890
4891 static void
4892 iwm_parent(struct ieee80211com *ic)
4893 {
4894         struct iwm_softc *sc = ic->ic_softc;
4895         int startall = 0;
4896
4897         IWM_LOCK(sc);
4898         if (ic->ic_nrunning > 0) {
4899                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4900                         iwm_init(sc);
4901                         startall = 1;
4902                 }
4903         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4904                 iwm_stop(sc);
4905         IWM_UNLOCK(sc);
4906         if (startall)
4907                 ieee80211_start_all(ic);
4908 }
4909
4910 /*
4911  * The interrupt side of things
4912  */
4913
4914 /*
4915  * error dumping routines are from iwlwifi/mvm/utils.c
4916  */
4917
4918 /*
4919  * Note: This structure is read from the device with IO accesses,
4920  * and the reading already does the endian conversion. As it is
4921  * read with uint32_t-sized accesses, any members with a different size
4922  * need to be ordered correctly though!
4923  */
4924 struct iwm_error_event_table {
4925         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4926         uint32_t error_id;              /* type of error */
4927         uint32_t trm_hw_status0;        /* TRM HW status */
4928         uint32_t trm_hw_status1;        /* TRM HW status */
4929         uint32_t blink2;                /* branch link */
4930         uint32_t ilink1;                /* interrupt link */
4931         uint32_t ilink2;                /* interrupt link */
4932         uint32_t data1;         /* error-specific data */
4933         uint32_t data2;         /* error-specific data */
4934         uint32_t data3;         /* error-specific data */
4935         uint32_t bcon_time;             /* beacon timer */
4936         uint32_t tsf_low;               /* network timestamp function timer */
4937         uint32_t tsf_hi;                /* network timestamp function timer */
4938         uint32_t gp1;           /* GP1 timer register */
4939         uint32_t gp2;           /* GP2 timer register */
4940         uint32_t fw_rev_type;   /* firmware revision type */
4941         uint32_t major;         /* uCode version major */
4942         uint32_t minor;         /* uCode version minor */
4943         uint32_t hw_ver;                /* HW Silicon version */
4944         uint32_t brd_ver;               /* HW board version */
4945         uint32_t log_pc;                /* log program counter */
4946         uint32_t frame_ptr;             /* frame pointer */
4947         uint32_t stack_ptr;             /* stack pointer */
4948         uint32_t hcmd;          /* last host command header */
4949         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
4950                                  * rxtx_flag */
4951         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
4952                                  * host_flag */
4953         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
4954                                  * enc_flag */
4955         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
4956                                  * time_flag */
4957         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
4958                                  * wico interrupt */
4959         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
4960         uint32_t wait_event;            /* wait event() caller address */
4961         uint32_t l2p_control;   /* L2pControlField */
4962         uint32_t l2p_duration;  /* L2pDurationField */
4963         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
4964         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
4965         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
4966                                  * (LMPM_PMG_SEL) */
4967         uint32_t u_timestamp;   /* indicate when the date and time of the
4968                                  * compilation */
4969         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
4970 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
4971
4972 /*
4973  * UMAC error struct - relevant starting from family 8000 chip.
4974  * Note: This structure is read from the device with IO accesses,
4975  * and the reading already does the endian conversion. As it is
4976  * read with u32-sized accesses, any members with a different size
4977  * need to be ordered correctly though!
4978  */
4979 struct iwm_umac_error_event_table {
4980         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4981         uint32_t error_id;      /* type of error */
4982         uint32_t blink1;        /* branch link */
4983         uint32_t blink2;        /* branch link */
4984         uint32_t ilink1;        /* interrupt link */
4985         uint32_t ilink2;        /* interrupt link */
4986         uint32_t data1;         /* error-specific data */
4987         uint32_t data2;         /* error-specific data */
4988         uint32_t data3;         /* error-specific data */
4989         uint32_t umac_major;
4990         uint32_t umac_minor;
4991         uint32_t frame_pointer; /* core register 27*/
4992         uint32_t stack_pointer; /* core register 28 */
4993         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
4994         uint32_t nic_isr_pref;  /* ISR status register */
4995 } __packed;
4996
4997 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
4998 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
4999
5000 #ifdef IWM_DEBUG
5001 struct {
5002         const char *name;
5003         uint8_t num;
5004 } advanced_lookup[] = {
5005         { "NMI_INTERRUPT_WDG", 0x34 },
5006         { "SYSASSERT", 0x35 },
5007         { "UCODE_VERSION_MISMATCH", 0x37 },
5008         { "BAD_COMMAND", 0x38 },
5009         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5010         { "FATAL_ERROR", 0x3D },
5011         { "NMI_TRM_HW_ERR", 0x46 },
5012         { "NMI_INTERRUPT_TRM", 0x4C },
5013         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5014         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5015         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5016         { "NMI_INTERRUPT_HOST", 0x66 },
5017         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5018         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5019         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5020         { "ADVANCED_SYSASSERT", 0 },
5021 };
5022
5023 static const char *
5024 iwm_desc_lookup(uint32_t num)
5025 {
5026         int i;
5027
5028         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5029                 if (advanced_lookup[i].num == num)
5030                         return advanced_lookup[i].name;
5031
5032         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5033         return advanced_lookup[i].name;
5034 }
5035
5036 static void
5037 iwm_nic_umac_error(struct iwm_softc *sc)
5038 {
5039         struct iwm_umac_error_event_table table;
5040         uint32_t base;
5041
5042         base = sc->umac_error_event_table;
5043
5044         if (base < 0x800000) {
5045                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5046                     base);
5047                 return;
5048         }
5049
5050         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5051                 device_printf(sc->sc_dev, "reading errlog failed\n");
5052                 return;
5053         }
5054
5055         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5056                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5057                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5058                     sc->sc_flags, table.valid);
5059         }
5060
5061         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5062                 iwm_desc_lookup(table.error_id));
5063         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5064         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5065         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5066             table.ilink1);
5067         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5068             table.ilink2);
5069         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5070         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5071         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5072         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5073         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5074         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5075             table.frame_pointer);
5076         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5077             table.stack_pointer);
5078         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5079         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5080             table.nic_isr_pref);
5081 }
5082
5083 /*
5084  * Support for dumping the error log seemed like a good idea ...
5085  * but it's mostly hex junk and the only sensible thing is the
5086  * hw/ucode revision (which we know anyway).  Since it's here,
5087  * I'll just leave it in, just in case e.g. the Intel guys want to
5088  * help us decipher some "ADVANCED_SYSASSERT" later.
5089  */
5090 static void
5091 iwm_nic_error(struct iwm_softc *sc)
5092 {
5093         struct iwm_error_event_table table;
5094         uint32_t base;
5095
5096         device_printf(sc->sc_dev, "dumping device error log\n");
5097         base = sc->error_event_table;
5098         if (base < 0x800000) {
5099                 device_printf(sc->sc_dev,
5100                     "Invalid error log pointer 0x%08x\n", base);
5101                 return;
5102         }
5103
5104         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5105                 device_printf(sc->sc_dev, "reading errlog failed\n");
5106                 return;
5107         }
5108
5109         if (!table.valid) {
5110                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5111                 return;
5112         }
5113
5114         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5115                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5116                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5117                     sc->sc_flags, table.valid);
5118         }
5119
5120         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5121             iwm_desc_lookup(table.error_id));
5122         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5123             table.trm_hw_status0);
5124         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5125             table.trm_hw_status1);
5126         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5127         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5128         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5129         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5130         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5131         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5132         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5133         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5134         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5135         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5136         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5137         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5138             table.fw_rev_type);
5139         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5140         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5141         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5142         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5143         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5144         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5145         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5146         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5147         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5148         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5149         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5150         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5151         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5152         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5153         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5154         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5155         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5156         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5157         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5158
5159         if (sc->umac_error_event_table)
5160                 iwm_nic_umac_error(sc);
5161 }
5162 #endif
5163
5164 static void
5165 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5166 {
5167         struct ieee80211com *ic = &sc->sc_ic;
5168         struct iwm_cmd_response *cresp;
5169         struct mbuf *m1;
5170         uint32_t offset = 0;
5171         uint32_t maxoff = IWM_RBUF_SIZE;
5172         uint32_t nextoff;
5173         boolean_t stolen = FALSE;
5174
5175 #define HAVEROOM(a)     \
5176     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5177
5178         while (HAVEROOM(offset)) {
5179                 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5180                     offset);
5181                 int qid, idx, code, len;
5182
5183                 qid = pkt->hdr.qid;
5184                 idx = pkt->hdr.idx;
5185
5186                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5187
5188                 /*
5189                  * randomly get these from the firmware, no idea why.
5190                  * they at least seem harmless, so just ignore them for now
5191                  */
5192                 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5193                     pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5194                         break;
5195                 }
5196
5197                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5198                     "rx packet qid=%d idx=%d type=%x\n",
5199                     qid & ~0x80, pkt->hdr.idx, code);
5200
5201                 len = le32toh(pkt->len_n_flags) & IWM_FH_RSCSR_FRAME_SIZE_MSK;
5202                 len += sizeof(uint32_t); /* account for status word */
5203                 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5204
5205                 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5206
5207                 switch (code) {
5208                 case IWM_REPLY_RX_PHY_CMD:
5209                         iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5210                         break;
5211
5212                 case IWM_REPLY_RX_MPDU_CMD: {
5213                         /*
5214                          * If this is the last frame in the RX buffer, we
5215                          * can directly feed the mbuf to the sharks here.
5216                          */
5217                         struct iwm_rx_packet *nextpkt = mtodoff(m,
5218                             struct iwm_rx_packet *, nextoff);
5219                         if (!HAVEROOM(nextoff) ||
5220                             (nextpkt->hdr.code == 0 &&
5221                              (nextpkt->hdr.qid & ~0x80) == 0 &&
5222                              nextpkt->hdr.idx == 0) ||
5223                             (nextpkt->len_n_flags ==
5224                              htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5225                                 if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5226                                         stolen = FALSE;
5227                                         /* Make sure we abort the loop */
5228                                         nextoff = maxoff;
5229                                 }
5230                                 break;
5231                         }
5232
5233                         /*
5234                          * Use m_copym instead of m_split, because that
5235                          * makes it easier to keep a valid rx buffer in
5236                          * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5237                          *
5238                          * We need to start m_copym() at offset 0, to get the
5239                          * M_PKTHDR flag preserved.
5240                          */
5241                         m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5242                         if (m1) {
5243                                 if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5244                                         stolen = TRUE;
5245                                 else
5246                                         m_freem(m1);
5247                         }
5248                         break;
5249                 }
5250
5251                 case IWM_TX_CMD:
5252                         iwm_mvm_rx_tx_cmd(sc, pkt);
5253                         break;
5254
5255                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5256                         struct iwm_missed_beacons_notif *resp;
5257                         int missed;
5258
5259                         /* XXX look at mac_id to determine interface ID */
5260                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5261
5262                         resp = (void *)pkt->data;
5263                         missed = le32toh(resp->consec_missed_beacons);
5264
5265                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5266                             "%s: MISSED_BEACON: mac_id=%d, "
5267                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5268                             "num_rx=%d\n",
5269                             __func__,
5270                             le32toh(resp->mac_id),
5271                             le32toh(resp->consec_missed_beacons_since_last_rx),
5272                             le32toh(resp->consec_missed_beacons),
5273                             le32toh(resp->num_expected_beacons),
5274                             le32toh(resp->num_recvd_beacons));
5275
5276                         /* Be paranoid */
5277                         if (vap == NULL)
5278                                 break;
5279
5280                         /* XXX no net80211 locking? */
5281                         if (vap->iv_state == IEEE80211_S_RUN &&
5282                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5283                                 if (missed > vap->iv_bmissthreshold) {
5284                                         /* XXX bad locking; turn into task */
5285                                         IWM_UNLOCK(sc);
5286                                         ieee80211_beacon_miss(ic);
5287                                         IWM_LOCK(sc);
5288                                 }
5289                         }
5290
5291                         break;
5292                 }
5293
5294                 case IWM_MFUART_LOAD_NOTIFICATION:
5295                         break;
5296
5297                 case IWM_MVM_ALIVE:
5298                         break;
5299
5300                 case IWM_CALIB_RES_NOTIF_PHY_DB:
5301                         break;
5302
5303                 case IWM_STATISTICS_NOTIFICATION: {
5304                         struct iwm_notif_statistics *stats;
5305                         stats = (void *)pkt->data;
5306                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5307                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5308                         break;
5309                 }
5310
5311                 case IWM_NVM_ACCESS_CMD:
5312                 case IWM_MCC_UPDATE_CMD:
5313                         if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5314                                 memcpy(sc->sc_cmd_resp,
5315                                     pkt, sizeof(sc->sc_cmd_resp));
5316                         }
5317                         break;
5318
5319                 case IWM_MCC_CHUB_UPDATE_CMD: {
5320                         struct iwm_mcc_chub_notif *notif;
5321                         notif = (void *)pkt->data;
5322
5323                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5324                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5325                         sc->sc_fw_mcc[2] = '\0';
5326                         IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5327                             "fw source %d sent CC '%s'\n",
5328                             notif->source_id, sc->sc_fw_mcc);
5329                         break;
5330                 }
5331
5332                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5333                 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5334                                  IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5335                         struct iwm_dts_measurement_notif_v1 *notif;
5336
5337                         if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5338                                 device_printf(sc->sc_dev,
5339                                     "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5340                                 break;
5341                         }
5342                         notif = (void *)pkt->data;
5343                         IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5344                             "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5345                             notif->temp);
5346                         break;
5347                 }
5348
5349                 case IWM_PHY_CONFIGURATION_CMD:
5350                 case IWM_TX_ANT_CONFIGURATION_CMD:
5351                 case IWM_ADD_STA:
5352                 case IWM_MAC_CONTEXT_CMD:
5353                 case IWM_REPLY_SF_CFG_CMD:
5354                 case IWM_POWER_TABLE_CMD:
5355                 case IWM_PHY_CONTEXT_CMD:
5356                 case IWM_BINDING_CONTEXT_CMD:
5357                 case IWM_TIME_EVENT_CMD:
5358                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5359                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5360                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5361                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5362                 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5363                 case IWM_REPLY_BEACON_FILTERING_CMD:
5364                 case IWM_MAC_PM_POWER_TABLE:
5365                 case IWM_TIME_QUOTA_CMD:
5366                 case IWM_REMOVE_STA:
5367                 case IWM_TXPATH_FLUSH:
5368                 case IWM_LQ_CMD:
5369                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5370                                  IWM_FW_PAGING_BLOCK_CMD):
5371                 case IWM_BT_CONFIG:
5372                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5373                         cresp = (void *)pkt->data;
5374                         if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5375                                 memcpy(sc->sc_cmd_resp,
5376                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5377                         }
5378                         break;
5379
5380                 /* ignore */
5381                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5382                         break;
5383
5384                 case IWM_INIT_COMPLETE_NOTIF:
5385                         break;
5386
5387                 case IWM_SCAN_OFFLOAD_COMPLETE:
5388                         iwm_mvm_rx_lmac_scan_complete_notif(sc, pkt);
5389                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5390                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5391                                 ieee80211_runtask(ic, &sc->sc_es_task);
5392                         }
5393                         break;
5394
5395                 case IWM_SCAN_ITERATION_COMPLETE: {
5396                         struct iwm_lmac_scan_complete_notif *notif;
5397                         notif = (void *)pkt->data;
5398                         break;
5399                 }
5400
5401                 case IWM_SCAN_COMPLETE_UMAC:
5402                         iwm_mvm_rx_umac_scan_complete_notif(sc, pkt);
5403                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5404                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5405                                 ieee80211_runtask(ic, &sc->sc_es_task);
5406                         }
5407                         break;
5408
5409                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5410                         struct iwm_umac_scan_iter_complete_notif *notif;
5411                         notif = (void *)pkt->data;
5412
5413                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5414                             "complete, status=0x%x, %d channels scanned\n",
5415                             notif->status, notif->scanned_channels);
5416                         break;
5417                 }
5418
5419                 case IWM_REPLY_ERROR: {
5420                         struct iwm_error_resp *resp;
5421                         resp = (void *)pkt->data;
5422
5423                         device_printf(sc->sc_dev,
5424                             "firmware error 0x%x, cmd 0x%x\n",
5425                             le32toh(resp->error_type),
5426                             resp->cmd_id);
5427                         break;
5428                 }
5429
5430                 case IWM_TIME_EVENT_NOTIFICATION: {
5431                         struct iwm_time_event_notif *notif;
5432                         notif = (void *)pkt->data;
5433
5434                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5435                             "TE notif status = 0x%x action = 0x%x\n",
5436                             notif->status, notif->action);
5437                         break;
5438                 }
5439
5440                 case IWM_MCAST_FILTER_CMD:
5441                         break;
5442
5443                 case IWM_SCD_QUEUE_CFG: {
5444                         struct iwm_scd_txq_cfg_rsp *rsp;
5445                         rsp = (void *)pkt->data;
5446
5447                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5448                             "queue cfg token=0x%x sta_id=%d "
5449                             "tid=%d scd_queue=%d\n",
5450                             rsp->token, rsp->sta_id, rsp->tid,
5451                             rsp->scd_queue);
5452                         break;
5453                 }
5454
5455                 default:
5456                         device_printf(sc->sc_dev,
5457                             "frame %d/%d %x UNHANDLED (this should "
5458                             "not happen)\n", qid & ~0x80, idx,
5459                             pkt->len_n_flags);
5460                         break;
5461                 }
5462
5463                 /*
5464                  * Why test bit 0x80?  The Linux driver:
5465                  *
5466                  * There is one exception:  uCode sets bit 15 when it
5467                  * originates the response/notification, i.e. when the
5468                  * response/notification is not a direct response to a
5469                  * command sent by the driver.  For example, uCode issues
5470                  * IWM_REPLY_RX when it sends a received frame to the driver;
5471                  * it is not a direct response to any driver command.
5472                  *
5473                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5474                  * uses a slightly different format for pkt->hdr, and "qid"
5475                  * is actually the upper byte of a two-byte field.
5476                  */
5477                 if (!(qid & (1 << 7)))
5478                         iwm_cmd_done(sc, pkt);
5479
5480                 offset = nextoff;
5481         }
5482         if (stolen)
5483                 m_freem(m);
5484 #undef HAVEROOM
5485 }
5486
5487 /*
5488  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5489  * Basic structure from if_iwn
5490  */
5491 static void
5492 iwm_notif_intr(struct iwm_softc *sc)
5493 {
5494         uint16_t hw;
5495
5496         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5497             BUS_DMASYNC_POSTREAD);
5498
5499         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5500
5501         /*
5502          * Process responses
5503          */
5504         while (sc->rxq.cur != hw) {
5505                 struct iwm_rx_ring *ring = &sc->rxq;
5506                 struct iwm_rx_data *data = &ring->data[ring->cur];
5507
5508                 bus_dmamap_sync(ring->data_dmat, data->map,
5509                     BUS_DMASYNC_POSTREAD);
5510
5511                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5512                     "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5513                 iwm_handle_rxb(sc, data->m);
5514
5515                 ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5516         }
5517
5518         /*
5519          * Tell the firmware that it can reuse the ring entries that
5520          * we have just processed.
5521          * Seems like the hardware gets upset unless we align
5522          * the write by 8??
5523          */
5524         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5525         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5526 }
5527
5528 static void
5529 iwm_intr(void *arg)
5530 {
5531         struct iwm_softc *sc = arg;
5532         int handled = 0;
5533         int r1, r2, rv = 0;
5534         int isperiodic = 0;
5535
5536         IWM_LOCK(sc);
5537         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5538
5539         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5540                 uint32_t *ict = sc->ict_dma.vaddr;
5541                 int tmp;
5542
5543                 tmp = htole32(ict[sc->ict_cur]);
5544                 if (!tmp)
5545                         goto out_ena;
5546
5547                 /*
5548                  * ok, there was something.  keep plowing until we have all.
5549                  */
5550                 r1 = r2 = 0;
5551                 while (tmp) {
5552                         r1 |= tmp;
5553                         ict[sc->ict_cur] = 0;
5554                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5555                         tmp = htole32(ict[sc->ict_cur]);
5556                 }
5557
5558                 /* this is where the fun begins.  don't ask */
5559                 if (r1 == 0xffffffff)
5560                         r1 = 0;
5561
5562                 /* i am not expected to understand this */
5563                 if (r1 & 0xc0000)
5564                         r1 |= 0x8000;
5565                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5566         } else {
5567                 r1 = IWM_READ(sc, IWM_CSR_INT);
5568                 /* "hardware gone" (where, fishing?) */
5569                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5570                         goto out;
5571                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5572         }
5573         if (r1 == 0 && r2 == 0) {
5574                 goto out_ena;
5575         }
5576
5577         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5578
5579         /* Safely ignore these bits for debug checks below */
5580         r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5581
5582         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5583                 int i;
5584                 struct ieee80211com *ic = &sc->sc_ic;
5585                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5586
5587 #ifdef IWM_DEBUG
5588                 iwm_nic_error(sc);
5589 #endif
5590                 /* Dump driver status (TX and RX rings) while we're here. */
5591                 device_printf(sc->sc_dev, "driver status:\n");
5592                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5593                         struct iwm_tx_ring *ring = &sc->txq[i];
5594                         device_printf(sc->sc_dev,
5595                             "  tx ring %2d: qid=%-2d cur=%-3d "
5596                             "queued=%-3d\n",
5597                             i, ring->qid, ring->cur, ring->queued);
5598                 }
5599                 device_printf(sc->sc_dev,
5600                     "  rx ring: cur=%d\n", sc->rxq.cur);
5601                 device_printf(sc->sc_dev,
5602                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5603
5604                 /* Don't stop the device; just do a VAP restart */
5605                 IWM_UNLOCK(sc);
5606
5607                 if (vap == NULL) {
5608                         printf("%s: null vap\n", __func__);
5609                         return;
5610                 }
5611
5612                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5613                     "restarting\n", __func__, vap->iv_state);
5614
5615                 ieee80211_restart_all(ic);
5616                 return;
5617         }
5618
5619         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5620                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5621                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5622                 iwm_stop(sc);
5623                 rv = 1;
5624                 goto out;
5625         }
5626
5627         /* firmware chunk loaded */
5628         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5629                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5630                 handled |= IWM_CSR_INT_BIT_FH_TX;
5631                 sc->sc_fw_chunk_done = 1;
5632                 wakeup(&sc->sc_fw);
5633         }
5634
5635         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5636                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5637                 if (iwm_check_rfkill(sc)) {
5638                         device_printf(sc->sc_dev,
5639                             "%s: rfkill switch, disabling interface\n",
5640                             __func__);
5641                         iwm_stop(sc);
5642                 }
5643         }
5644
5645         /*
5646          * The Linux driver uses periodic interrupts to avoid races.
5647          * We cargo-cult like it's going out of fashion.
5648          */
5649         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5650                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5651                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5652                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5653                         IWM_WRITE_1(sc,
5654                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5655                 isperiodic = 1;
5656         }
5657
5658         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5659                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5660                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5661
5662                 iwm_notif_intr(sc);
5663
5664                 /* enable periodic interrupt, see above */
5665                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5666                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5667                             IWM_CSR_INT_PERIODIC_ENA);
5668         }
5669
5670         if (__predict_false(r1 & ~handled))
5671                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5672                     "%s: unhandled interrupts: %x\n", __func__, r1);
5673         rv = 1;
5674
5675  out_ena:
5676         iwm_restore_interrupts(sc);
5677  out:
5678         IWM_UNLOCK(sc);
5679         return;
5680 }
5681
5682 /*
5683  * Autoconf glue-sniffing
5684  */
5685 #define PCI_VENDOR_INTEL                0x8086
5686 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5687 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5688 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5689 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5690 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5691 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5692 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5693 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5694 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5695 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5696
5697 static const struct iwm_devices {
5698         uint16_t                device;
5699         const struct iwm_cfg    *cfg;
5700 } iwm_devices[] = {
5701         { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5702         { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5703         { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5704         { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5705         { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5706         { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5707         { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5708         { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5709         { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5710         { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5711 };
5712
5713 static int
5714 iwm_probe(device_t dev)
5715 {
5716         int i;
5717
5718         for (i = 0; i < nitems(iwm_devices); i++) {
5719                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5720                     pci_get_device(dev) == iwm_devices[i].device) {
5721                         device_set_desc(dev, iwm_devices[i].cfg->name);
5722                         return (BUS_PROBE_DEFAULT);
5723                 }
5724         }
5725
5726         return (ENXIO);
5727 }
5728
5729 static int
5730 iwm_dev_check(device_t dev)
5731 {
5732         struct iwm_softc *sc;
5733         uint16_t devid;
5734         int i;
5735
5736         sc = device_get_softc(dev);
5737
5738         devid = pci_get_device(dev);
5739         for (i = 0; i < nitems(iwm_devices); i++) {
5740                 if (iwm_devices[i].device == devid) {
5741                         sc->cfg = iwm_devices[i].cfg;
5742                         return (0);
5743                 }
5744         }
5745         device_printf(dev, "unknown adapter type\n");
5746         return ENXIO;
5747 }
5748
5749 /* PCI registers */
5750 #define PCI_CFG_RETRY_TIMEOUT   0x041
5751
5752 static int
5753 iwm_pci_attach(device_t dev)
5754 {
5755         struct iwm_softc *sc;
5756         int count, error, rid;
5757         uint16_t reg;
5758
5759         sc = device_get_softc(dev);
5760
5761         /* We disable the RETRY_TIMEOUT register (0x41) to keep
5762          * PCI Tx retries from interfering with C3 CPU state */
5763         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5764
5765         /* Enable bus-mastering and hardware bug workaround. */
5766         pci_enable_busmaster(dev);
5767         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5768         /* if !MSI */
5769         if (reg & PCIM_STATUS_INTxSTATE) {
5770                 reg &= ~PCIM_STATUS_INTxSTATE;
5771         }
5772         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5773
5774         rid = PCIR_BAR(0);
5775         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5776             RF_ACTIVE);
5777         if (sc->sc_mem == NULL) {
5778                 device_printf(sc->sc_dev, "can't map mem space\n");
5779                 return (ENXIO);
5780         }
5781         sc->sc_st = rman_get_bustag(sc->sc_mem);
5782         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5783
5784         /* Install interrupt handler. */
5785         count = 1;
5786         rid = 0;
5787         if (pci_alloc_msi(dev, &count) == 0)
5788                 rid = 1;
5789         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5790             (rid != 0 ? 0 : RF_SHAREABLE));
5791         if (sc->sc_irq == NULL) {
5792                 device_printf(dev, "can't map interrupt\n");
5793                         return (ENXIO);
5794         }
5795         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5796             NULL, iwm_intr, sc, &sc->sc_ih);
5797         if (sc->sc_ih == NULL) {
5798                 device_printf(dev, "can't establish interrupt");
5799                         return (ENXIO);
5800         }
5801         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5802
5803         return (0);
5804 }
5805
5806 static void
5807 iwm_pci_detach(device_t dev)
5808 {
5809         struct iwm_softc *sc = device_get_softc(dev);
5810
5811         if (sc->sc_irq != NULL) {
5812                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5813                 bus_release_resource(dev, SYS_RES_IRQ,
5814                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5815                 pci_release_msi(dev);
5816         }
5817         if (sc->sc_mem != NULL)
5818                 bus_release_resource(dev, SYS_RES_MEMORY,
5819                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5820 }
5821
5822
5823
5824 static int
5825 iwm_attach(device_t dev)
5826 {
5827         struct iwm_softc *sc = device_get_softc(dev);
5828         struct ieee80211com *ic = &sc->sc_ic;
5829         int error;
5830         int txq_i, i;
5831
5832         sc->sc_dev = dev;
5833         sc->sc_attached = 1;
5834         IWM_LOCK_INIT(sc);
5835         mbufq_init(&sc->sc_snd, ifqmaxlen);
5836         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5837         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5838         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5839
5840         sc->sc_notif_wait = iwm_notification_wait_init(sc);
5841         if (sc->sc_notif_wait == NULL) {
5842                 device_printf(dev, "failed to init notification wait struct\n");
5843                 goto fail;
5844         }
5845
5846         sc->sf_state = IWM_SF_UNINIT;
5847
5848         /* Init phy db */
5849         sc->sc_phy_db = iwm_phy_db_init(sc);
5850         if (!sc->sc_phy_db) {
5851                 device_printf(dev, "Cannot init phy_db\n");
5852                 goto fail;
5853         }
5854
5855         /* Set EBS as successful as long as not stated otherwise by the FW. */
5856         sc->last_ebs_successful = TRUE;
5857
5858         /* PCI attach */
5859         error = iwm_pci_attach(dev);
5860         if (error != 0)
5861                 goto fail;
5862
5863         sc->sc_wantresp = -1;
5864
5865         /* Check device type */
5866         error = iwm_dev_check(dev);
5867         if (error != 0)
5868                 goto fail;
5869
5870         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5871         /*
5872          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5873          * changed, and now the revision step also includes bit 0-1 (no more
5874          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5875          * in the old format.
5876          */
5877         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
5878                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5879                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5880
5881         if (iwm_prepare_card_hw(sc) != 0) {
5882                 device_printf(dev, "could not initialize hardware\n");
5883                 goto fail;
5884         }
5885
5886         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5887                 int ret;
5888                 uint32_t hw_step;
5889
5890                 /*
5891                  * In order to recognize C step the driver should read the
5892                  * chip version id located at the AUX bus MISC address.
5893                  */
5894                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5895                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5896                 DELAY(2);
5897
5898                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5899                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5900                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5901                                    25000);
5902                 if (!ret) {
5903                         device_printf(sc->sc_dev,
5904                             "Failed to wake up the nic\n");
5905                         goto fail;
5906                 }
5907
5908                 if (iwm_nic_lock(sc)) {
5909                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5910                         hw_step |= IWM_ENABLE_WFPM;
5911                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5912                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5913                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5914                         if (hw_step == 0x3)
5915                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5916                                                 (IWM_SILICON_C_STEP << 2);
5917                         iwm_nic_unlock(sc);
5918                 } else {
5919                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
5920                         goto fail;
5921                 }
5922         }
5923
5924         /* special-case 7265D, it has the same PCI IDs. */
5925         if (sc->cfg == &iwm7265_cfg &&
5926             (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5927                 sc->cfg = &iwm7265d_cfg;
5928         }
5929
5930         /* Allocate DMA memory for firmware transfers. */
5931         if ((error = iwm_alloc_fwmem(sc)) != 0) {
5932                 device_printf(dev, "could not allocate memory for firmware\n");
5933                 goto fail;
5934         }
5935
5936         /* Allocate "Keep Warm" page. */
5937         if ((error = iwm_alloc_kw(sc)) != 0) {
5938                 device_printf(dev, "could not allocate keep warm page\n");
5939                 goto fail;
5940         }
5941
5942         /* We use ICT interrupts */
5943         if ((error = iwm_alloc_ict(sc)) != 0) {
5944                 device_printf(dev, "could not allocate ICT table\n");
5945                 goto fail;
5946         }
5947
5948         /* Allocate TX scheduler "rings". */
5949         if ((error = iwm_alloc_sched(sc)) != 0) {
5950                 device_printf(dev, "could not allocate TX scheduler rings\n");
5951                 goto fail;
5952         }
5953
5954         /* Allocate TX rings */
5955         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
5956                 if ((error = iwm_alloc_tx_ring(sc,
5957                     &sc->txq[txq_i], txq_i)) != 0) {
5958                         device_printf(dev,
5959                             "could not allocate TX ring %d\n",
5960                             txq_i);
5961                         goto fail;
5962                 }
5963         }
5964
5965         /* Allocate RX ring. */
5966         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
5967                 device_printf(dev, "could not allocate RX ring\n");
5968                 goto fail;
5969         }
5970
5971         /* Clear pending interrupts. */
5972         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
5973
5974         ic->ic_softc = sc;
5975         ic->ic_name = device_get_nameunit(sc->sc_dev);
5976         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
5977         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
5978
5979         /* Set device capabilities. */
5980         ic->ic_caps =
5981             IEEE80211_C_STA |
5982             IEEE80211_C_WPA |           /* WPA/RSN */
5983             IEEE80211_C_WME |
5984             IEEE80211_C_PMGT |
5985             IEEE80211_C_SHSLOT |        /* short slot time supported */
5986             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
5987 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
5988             ;
5989         /* Advertise full-offload scanning */
5990         ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
5991         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
5992                 sc->sc_phyctxt[i].id = i;
5993                 sc->sc_phyctxt[i].color = 0;
5994                 sc->sc_phyctxt[i].ref = 0;
5995                 sc->sc_phyctxt[i].channel = NULL;
5996         }
5997
5998         /* Default noise floor */
5999         sc->sc_noise = -96;
6000
6001         /* Max RSSI */
6002         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6003
6004         sc->sc_preinit_hook.ich_func = iwm_preinit;
6005         sc->sc_preinit_hook.ich_arg = sc;
6006         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6007                 device_printf(dev, "config_intrhook_establish failed\n");
6008                 goto fail;
6009         }
6010
6011 #ifdef IWM_DEBUG
6012         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6013             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6014             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6015 #endif
6016
6017         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6018             "<-%s\n", __func__);
6019
6020         return 0;
6021
6022         /* Free allocated memory if something failed during attachment. */
6023 fail:
6024         iwm_detach_local(sc, 0);
6025
6026         return ENXIO;
6027 }
6028
6029 static int
6030 iwm_is_valid_ether_addr(uint8_t *addr)
6031 {
6032         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6033
6034         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6035                 return (FALSE);
6036
6037         return (TRUE);
6038 }
6039
6040 static int
6041 iwm_wme_update(struct ieee80211com *ic)
6042 {
6043 #define IWM_EXP2(x)     ((1 << (x)) - 1)        /* CWmin = 2^ECWmin - 1 */
6044         struct iwm_softc *sc = ic->ic_softc;
6045         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6046         struct iwm_vap *ivp = IWM_VAP(vap);
6047         struct iwm_node *in;
6048         struct wmeParams tmp[WME_NUM_AC];
6049         int aci, error;
6050
6051         if (vap == NULL)
6052                 return (0);
6053
6054         IEEE80211_LOCK(ic);
6055         for (aci = 0; aci < WME_NUM_AC; aci++)
6056                 tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6057         IEEE80211_UNLOCK(ic);
6058
6059         IWM_LOCK(sc);
6060         for (aci = 0; aci < WME_NUM_AC; aci++) {
6061                 const struct wmeParams *ac = &tmp[aci];
6062                 ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6063                 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6064                 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6065                 ivp->queue_params[aci].edca_txop =
6066                     IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6067         }
6068         ivp->have_wme = TRUE;
6069         if (ivp->is_uploaded && vap->iv_bss != NULL) {
6070                 in = IWM_NODE(vap->iv_bss);
6071                 if (in->in_assoc) {
6072                         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6073                                 device_printf(sc->sc_dev,
6074                                     "%s: failed to update MAC\n", __func__);
6075                         }
6076                 }
6077         }
6078         IWM_UNLOCK(sc);
6079
6080         return (0);
6081 #undef IWM_EXP2
6082 }
6083
6084 static void
6085 iwm_preinit(void *arg)
6086 {
6087         struct iwm_softc *sc = arg;
6088         device_t dev = sc->sc_dev;
6089         struct ieee80211com *ic = &sc->sc_ic;
6090         int error;
6091
6092         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6093             "->%s\n", __func__);
6094
6095         IWM_LOCK(sc);
6096         if ((error = iwm_start_hw(sc)) != 0) {
6097                 device_printf(dev, "could not initialize hardware\n");
6098                 IWM_UNLOCK(sc);
6099                 goto fail;
6100         }
6101
6102         error = iwm_run_init_mvm_ucode(sc, 1);
6103         iwm_stop_device(sc);
6104         if (error) {
6105                 IWM_UNLOCK(sc);
6106                 goto fail;
6107         }
6108         device_printf(dev,
6109             "hw rev 0x%x, fw ver %s, address %s\n",
6110             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6111             sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6112
6113         /* not all hardware can do 5GHz band */
6114         if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6115                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6116                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6117         IWM_UNLOCK(sc);
6118
6119         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6120             ic->ic_channels);
6121
6122         /*
6123          * At this point we've committed - if we fail to do setup,
6124          * we now also have to tear down the net80211 state.
6125          */
6126         ieee80211_ifattach(ic);
6127         ic->ic_vap_create = iwm_vap_create;
6128         ic->ic_vap_delete = iwm_vap_delete;
6129         ic->ic_raw_xmit = iwm_raw_xmit;
6130         ic->ic_node_alloc = iwm_node_alloc;
6131         ic->ic_scan_start = iwm_scan_start;
6132         ic->ic_scan_end = iwm_scan_end;
6133         ic->ic_update_mcast = iwm_update_mcast;
6134         ic->ic_getradiocaps = iwm_init_channel_map;
6135         ic->ic_set_channel = iwm_set_channel;
6136         ic->ic_scan_curchan = iwm_scan_curchan;
6137         ic->ic_scan_mindwell = iwm_scan_mindwell;
6138         ic->ic_wme.wme_update = iwm_wme_update;
6139         ic->ic_parent = iwm_parent;
6140         ic->ic_transmit = iwm_transmit;
6141         iwm_radiotap_attach(sc);
6142         if (bootverbose)
6143                 ieee80211_announce(ic);
6144
6145         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6146             "<-%s\n", __func__);
6147         config_intrhook_disestablish(&sc->sc_preinit_hook);
6148
6149         return;
6150 fail:
6151         config_intrhook_disestablish(&sc->sc_preinit_hook);
6152         iwm_detach_local(sc, 0);
6153 }
6154
6155 /*
6156  * Attach the interface to 802.11 radiotap.
6157  */
6158 static void
6159 iwm_radiotap_attach(struct iwm_softc *sc)
6160 {
6161         struct ieee80211com *ic = &sc->sc_ic;
6162
6163         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6164             "->%s begin\n", __func__);
6165         ieee80211_radiotap_attach(ic,
6166             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6167                 IWM_TX_RADIOTAP_PRESENT,
6168             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6169                 IWM_RX_RADIOTAP_PRESENT);
6170         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6171             "->%s end\n", __func__);
6172 }
6173
6174 static struct ieee80211vap *
6175 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6176     enum ieee80211_opmode opmode, int flags,
6177     const uint8_t bssid[IEEE80211_ADDR_LEN],
6178     const uint8_t mac[IEEE80211_ADDR_LEN])
6179 {
6180         struct iwm_vap *ivp;
6181         struct ieee80211vap *vap;
6182
6183         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6184                 return NULL;
6185         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6186         vap = &ivp->iv_vap;
6187         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6188         vap->iv_bmissthreshold = 10;            /* override default */
6189         /* Override with driver methods. */
6190         ivp->iv_newstate = vap->iv_newstate;
6191         vap->iv_newstate = iwm_newstate;
6192
6193         ivp->id = IWM_DEFAULT_MACID;
6194         ivp->color = IWM_DEFAULT_COLOR;
6195
6196         ivp->have_wme = FALSE;
6197         ivp->ps_disabled = FALSE;
6198
6199         ieee80211_ratectl_init(vap);
6200         /* Complete setup. */
6201         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6202             mac);
6203         ic->ic_opmode = opmode;
6204
6205         return vap;
6206 }
6207
6208 static void
6209 iwm_vap_delete(struct ieee80211vap *vap)
6210 {
6211         struct iwm_vap *ivp = IWM_VAP(vap);
6212
6213         ieee80211_ratectl_deinit(vap);
6214         ieee80211_vap_detach(vap);
6215         free(ivp, M_80211_VAP);
6216 }
6217
6218 static void
6219 iwm_scan_start(struct ieee80211com *ic)
6220 {
6221         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6222         struct iwm_softc *sc = ic->ic_softc;
6223         int error;
6224
6225         IWM_LOCK(sc);
6226         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6227                 /* This should not be possible */
6228                 device_printf(sc->sc_dev,
6229                     "%s: Previous scan not completed yet\n", __func__);
6230         }
6231         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6232                 error = iwm_mvm_umac_scan(sc);
6233         else
6234                 error = iwm_mvm_lmac_scan(sc);
6235         if (error != 0) {
6236                 device_printf(sc->sc_dev, "could not initiate scan\n");
6237                 IWM_UNLOCK(sc);
6238                 ieee80211_cancel_scan(vap);
6239         } else {
6240                 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6241                 iwm_led_blink_start(sc);
6242                 IWM_UNLOCK(sc);
6243         }
6244 }
6245
6246 static void
6247 iwm_scan_end(struct ieee80211com *ic)
6248 {
6249         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6250         struct iwm_softc *sc = ic->ic_softc;
6251
6252         IWM_LOCK(sc);
6253         iwm_led_blink_stop(sc);
6254         if (vap->iv_state == IEEE80211_S_RUN)
6255                 iwm_mvm_led_enable(sc);
6256         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6257                 /*
6258                  * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6259                  * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6260                  * taskqueue.
6261                  */
6262                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6263                 iwm_mvm_scan_stop_wait(sc);
6264         }
6265         IWM_UNLOCK(sc);
6266
6267         /*
6268          * Make sure we don't race, if sc_es_task is still enqueued here.
6269          * This is to make sure that it won't call ieee80211_scan_done
6270          * when we have already started the next scan.
6271          */
6272         taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6273 }
6274
6275 static void
6276 iwm_update_mcast(struct ieee80211com *ic)
6277 {
6278 }
6279
6280 static void
6281 iwm_set_channel(struct ieee80211com *ic)
6282 {
6283 }
6284
6285 static void
6286 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6287 {
6288 }
6289
6290 static void
6291 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6292 {
6293         return;
6294 }
6295
6296 void
6297 iwm_init_task(void *arg1)
6298 {
6299         struct iwm_softc *sc = arg1;
6300
6301         IWM_LOCK(sc);
6302         while (sc->sc_flags & IWM_FLAG_BUSY)
6303                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6304         sc->sc_flags |= IWM_FLAG_BUSY;
6305         iwm_stop(sc);
6306         if (sc->sc_ic.ic_nrunning > 0)
6307                 iwm_init(sc);
6308         sc->sc_flags &= ~IWM_FLAG_BUSY;
6309         wakeup(&sc->sc_flags);
6310         IWM_UNLOCK(sc);
6311 }
6312
6313 static int
6314 iwm_resume(device_t dev)
6315 {
6316         struct iwm_softc *sc = device_get_softc(dev);
6317         int do_reinit = 0;
6318
6319         /*
6320          * We disable the RETRY_TIMEOUT register (0x41) to keep
6321          * PCI Tx retries from interfering with C3 CPU state.
6322          */
6323         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6324         iwm_init_task(device_get_softc(dev));
6325
6326         IWM_LOCK(sc);
6327         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6328                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6329                 do_reinit = 1;
6330         }
6331         IWM_UNLOCK(sc);
6332
6333         if (do_reinit)
6334                 ieee80211_resume_all(&sc->sc_ic);
6335
6336         return 0;
6337 }
6338
6339 static int
6340 iwm_suspend(device_t dev)
6341 {
6342         int do_stop = 0;
6343         struct iwm_softc *sc = device_get_softc(dev);
6344
6345         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6346
6347         ieee80211_suspend_all(&sc->sc_ic);
6348
6349         if (do_stop) {
6350                 IWM_LOCK(sc);
6351                 iwm_stop(sc);
6352                 sc->sc_flags |= IWM_FLAG_SCANNING;
6353                 IWM_UNLOCK(sc);
6354         }
6355
6356         return (0);
6357 }
6358
6359 static int
6360 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6361 {
6362         struct iwm_fw_info *fw = &sc->sc_fw;
6363         device_t dev = sc->sc_dev;
6364         int i;
6365
6366         if (!sc->sc_attached)
6367                 return 0;
6368         sc->sc_attached = 0;
6369
6370         if (do_net80211)
6371                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6372
6373         callout_drain(&sc->sc_led_blink_to);
6374         callout_drain(&sc->sc_watchdog_to);
6375         iwm_stop_device(sc);
6376         if (do_net80211) {
6377                 ieee80211_ifdetach(&sc->sc_ic);
6378         }
6379
6380         iwm_phy_db_free(sc->sc_phy_db);
6381         sc->sc_phy_db = NULL;
6382
6383         iwm_free_nvm_data(sc->nvm_data);
6384
6385         /* Free descriptor rings */
6386         iwm_free_rx_ring(sc, &sc->rxq);
6387         for (i = 0; i < nitems(sc->txq); i++)
6388                 iwm_free_tx_ring(sc, &sc->txq[i]);
6389
6390         /* Free firmware */
6391         if (fw->fw_fp != NULL)
6392                 iwm_fw_info_free(fw);
6393
6394         /* Free scheduler */
6395         iwm_dma_contig_free(&sc->sched_dma);
6396         iwm_dma_contig_free(&sc->ict_dma);
6397         iwm_dma_contig_free(&sc->kw_dma);
6398         iwm_dma_contig_free(&sc->fw_dma);
6399
6400         iwm_free_fw_paging(sc);
6401
6402         /* Finished with the hardware - detach things */
6403         iwm_pci_detach(dev);
6404
6405         if (sc->sc_notif_wait != NULL) {
6406                 iwm_notification_wait_free(sc->sc_notif_wait);
6407                 sc->sc_notif_wait = NULL;
6408         }
6409
6410         mbufq_drain(&sc->sc_snd);
6411         IWM_LOCK_DESTROY(sc);
6412
6413         return (0);
6414 }
6415
6416 static int
6417 iwm_detach(device_t dev)
6418 {
6419         struct iwm_softc *sc = device_get_softc(dev);
6420
6421         return (iwm_detach_local(sc, 1));
6422 }
6423
6424 static device_method_t iwm_pci_methods[] = {
6425         /* Device interface */
6426         DEVMETHOD(device_probe,         iwm_probe),
6427         DEVMETHOD(device_attach,        iwm_attach),
6428         DEVMETHOD(device_detach,        iwm_detach),
6429         DEVMETHOD(device_suspend,       iwm_suspend),
6430         DEVMETHOD(device_resume,        iwm_resume),
6431
6432         DEVMETHOD_END
6433 };
6434
6435 static driver_t iwm_pci_driver = {
6436         "iwm",
6437         iwm_pci_methods,
6438         sizeof (struct iwm_softc)
6439 };
6440
6441 static devclass_t iwm_devclass;
6442
6443 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6444 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6445 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6446 MODULE_DEPEND(iwm, wlan, 1, 1, 1);