]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
MFC r332897 (by imp), r333123:
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $    */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109 #include "opt_iwm.h"
110
111 #include <sys/param.h>
112 #include <sys/bus.h>
113 #include <sys/conf.h>
114 #include <sys/endian.h>
115 #include <sys/firmware.h>
116 #include <sys/kernel.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/mutex.h>
120 #include <sys/module.h>
121 #include <sys/proc.h>
122 #include <sys/rman.h>
123 #include <sys/socket.h>
124 #include <sys/sockio.h>
125 #include <sys/sysctl.h>
126 #include <sys/linker.h>
127
128 #include <machine/bus.h>
129 #include <machine/endian.h>
130 #include <machine/resource.h>
131
132 #include <dev/pci/pcivar.h>
133 #include <dev/pci/pcireg.h>
134
135 #include <net/bpf.h>
136
137 #include <net/if.h>
138 #include <net/if_var.h>
139 #include <net/if_arp.h>
140 #include <net/if_dl.h>
141 #include <net/if_media.h>
142 #include <net/if_types.h>
143
144 #include <netinet/in.h>
145 #include <netinet/in_systm.h>
146 #include <netinet/if_ether.h>
147 #include <netinet/ip.h>
148
149 #include <net80211/ieee80211_var.h>
150 #include <net80211/ieee80211_regdomain.h>
151 #include <net80211/ieee80211_ratectl.h>
152 #include <net80211/ieee80211_radiotap.h>
153
154 #include <dev/iwm/if_iwmreg.h>
155 #include <dev/iwm/if_iwmvar.h>
156 #include <dev/iwm/if_iwm_config.h>
157 #include <dev/iwm/if_iwm_debug.h>
158 #include <dev/iwm/if_iwm_notif_wait.h>
159 #include <dev/iwm/if_iwm_util.h>
160 #include <dev/iwm/if_iwm_binding.h>
161 #include <dev/iwm/if_iwm_phy_db.h>
162 #include <dev/iwm/if_iwm_mac_ctxt.h>
163 #include <dev/iwm/if_iwm_phy_ctxt.h>
164 #include <dev/iwm/if_iwm_time_event.h>
165 #include <dev/iwm/if_iwm_power.h>
166 #include <dev/iwm/if_iwm_scan.h>
167 #include <dev/iwm/if_iwm_sta.h>
168
169 #include <dev/iwm/if_iwm_pcie_trans.h>
170 #include <dev/iwm/if_iwm_led.h>
171 #include <dev/iwm/if_iwm_fw.h>
172
173 /* From DragonflyBSD */
174 #define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
175
176 const uint8_t iwm_nvm_channels[] = {
177         /* 2.4 GHz */
178         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
179         /* 5 GHz */
180         36, 40, 44, 48, 52, 56, 60, 64,
181         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
182         149, 153, 157, 161, 165
183 };
184 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
185     "IWM_NUM_CHANNELS is too small");
186
187 const uint8_t iwm_nvm_channels_8000[] = {
188         /* 2.4 GHz */
189         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
190         /* 5 GHz */
191         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
192         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
193         149, 153, 157, 161, 165, 169, 173, 177, 181
194 };
195 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
196     "IWM_NUM_CHANNELS_8000 is too small");
197
198 #define IWM_NUM_2GHZ_CHANNELS   14
199 #define IWM_N_HW_ADDR_MASK      0xF
200
201 /*
202  * XXX For now, there's simply a fixed set of rate table entries
203  * that are populated.
204  */
205 const struct iwm_rate {
206         uint8_t rate;
207         uint8_t plcp;
208 } iwm_rates[] = {
209         {   2,  IWM_RATE_1M_PLCP  },
210         {   4,  IWM_RATE_2M_PLCP  },
211         {  11,  IWM_RATE_5M_PLCP  },
212         {  22,  IWM_RATE_11M_PLCP },
213         {  12,  IWM_RATE_6M_PLCP  },
214         {  18,  IWM_RATE_9M_PLCP  },
215         {  24,  IWM_RATE_12M_PLCP },
216         {  36,  IWM_RATE_18M_PLCP },
217         {  48,  IWM_RATE_24M_PLCP },
218         {  72,  IWM_RATE_36M_PLCP },
219         {  96,  IWM_RATE_48M_PLCP },
220         { 108,  IWM_RATE_54M_PLCP },
221 };
222 #define IWM_RIDX_CCK    0
223 #define IWM_RIDX_OFDM   4
224 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
225 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
226 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
227
228 struct iwm_nvm_section {
229         uint16_t length;
230         uint8_t *data;
231 };
232
233 #define IWM_MVM_UCODE_ALIVE_TIMEOUT     hz
234 #define IWM_MVM_UCODE_CALIB_TIMEOUT     (2*hz)
235
236 struct iwm_mvm_alive_data {
237         int valid;
238         uint32_t scd_base_addr;
239 };
240
241 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
242 static int      iwm_firmware_store_section(struct iwm_softc *,
243                                            enum iwm_ucode_type,
244                                            const uint8_t *, size_t);
245 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
246 static void     iwm_fw_info_free(struct iwm_fw_info *);
247 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
248 static int      iwm_alloc_fwmem(struct iwm_softc *);
249 static int      iwm_alloc_sched(struct iwm_softc *);
250 static int      iwm_alloc_kw(struct iwm_softc *);
251 static int      iwm_alloc_ict(struct iwm_softc *);
252 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
253 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
256                                   int);
257 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
258 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259 static void     iwm_enable_interrupts(struct iwm_softc *);
260 static void     iwm_restore_interrupts(struct iwm_softc *);
261 static void     iwm_disable_interrupts(struct iwm_softc *);
262 static void     iwm_ict_reset(struct iwm_softc *);
263 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
264 static void     iwm_stop_device(struct iwm_softc *);
265 static void     iwm_mvm_nic_config(struct iwm_softc *);
266 static int      iwm_nic_rx_init(struct iwm_softc *);
267 static int      iwm_nic_tx_init(struct iwm_softc *);
268 static int      iwm_nic_init(struct iwm_softc *);
269 static int      iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
270 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
271                                    uint16_t, uint8_t *, uint16_t *);
272 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
273                                      uint16_t *, uint32_t);
274 static uint32_t iwm_eeprom_channel_flags(uint16_t);
275 static void     iwm_add_channel_band(struct iwm_softc *,
276                     struct ieee80211_channel[], int, int *, int, size_t,
277                     const uint8_t[]);
278 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
279                     struct ieee80211_channel[]);
280 static struct iwm_nvm_data *
281         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
282                            const uint16_t *, const uint16_t *,
283                            const uint16_t *, const uint16_t *,
284                            const uint16_t *);
285 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
286 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
287                                                struct iwm_nvm_data *,
288                                                const uint16_t *,
289                                                const uint16_t *);
290 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
291                             const uint16_t *);
292 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
293 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
294                                   const uint16_t *);
295 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
296                                    const uint16_t *);
297 static void     iwm_set_radio_cfg(const struct iwm_softc *,
298                                   struct iwm_nvm_data *, uint32_t);
299 static struct iwm_nvm_data *
300         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
301 static int      iwm_nvm_init(struct iwm_softc *);
302 static int      iwm_pcie_load_section(struct iwm_softc *, uint8_t,
303                                       const struct iwm_fw_desc *);
304 static int      iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
305                                              bus_addr_t, uint32_t);
306 static int      iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
307                                                 const struct iwm_fw_sects *,
308                                                 int, int *);
309 static int      iwm_pcie_load_cpu_sections(struct iwm_softc *,
310                                            const struct iwm_fw_sects *,
311                                            int, int *);
312 static int      iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
313                                                const struct iwm_fw_sects *);
314 static int      iwm_pcie_load_given_ucode(struct iwm_softc *,
315                                           const struct iwm_fw_sects *);
316 static int      iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
317 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
318 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
319 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
320                                               enum iwm_ucode_type);
321 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
322 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
323 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
324                                             struct iwm_rx_phy_info *);
325 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
326                                       struct iwm_rx_packet *);
327 static int      iwm_get_noise(struct iwm_softc *,
328                     const struct iwm_mvm_statistics_rx_non_phy *);
329 static void     iwm_mvm_handle_rx_statistics(struct iwm_softc *,
330                     struct iwm_rx_packet *);
331 static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
332                                     uint32_t, boolean_t);
333 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
334                                          struct iwm_rx_packet *,
335                                          struct iwm_node *);
336 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
337 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
338 #if 0
339 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
340                                  uint16_t);
341 #endif
342 static const struct iwm_rate *
343         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
344                         struct mbuf *, struct iwm_tx_cmd *);
345 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
346                        struct ieee80211_node *, int);
347 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
348                              const struct ieee80211_bpf_params *);
349 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
350 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
351 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
352 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
353 static struct ieee80211_node *
354                 iwm_node_alloc(struct ieee80211vap *,
355                                const uint8_t[IEEE80211_ADDR_LEN]);
356 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
357 static int      iwm_media_change(struct ifnet *);
358 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
359 static void     iwm_endscan_cb(void *, int);
360 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
361                                         struct iwm_sf_cfg_cmd *,
362                                         struct ieee80211_node *);
363 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
364 static int      iwm_send_bt_init_conf(struct iwm_softc *);
365 static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
366 static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
367 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
368 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
369 static int      iwm_init_hw(struct iwm_softc *);
370 static void     iwm_init(struct iwm_softc *);
371 static void     iwm_start(struct iwm_softc *);
372 static void     iwm_stop(struct iwm_softc *);
373 static void     iwm_watchdog(void *);
374 static void     iwm_parent(struct ieee80211com *);
375 #ifdef IWM_DEBUG
376 static const char *
377                 iwm_desc_lookup(uint32_t);
378 static void     iwm_nic_error(struct iwm_softc *);
379 static void     iwm_nic_umac_error(struct iwm_softc *);
380 #endif
381 static void     iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
382 static void     iwm_notif_intr(struct iwm_softc *);
383 static void     iwm_intr(void *);
384 static int      iwm_attach(device_t);
385 static int      iwm_is_valid_ether_addr(uint8_t *);
386 static void     iwm_preinit(void *);
387 static int      iwm_detach_local(struct iwm_softc *sc, int);
388 static void     iwm_init_task(void *);
389 static void     iwm_radiotap_attach(struct iwm_softc *);
390 static struct ieee80211vap *
391                 iwm_vap_create(struct ieee80211com *,
392                                const char [IFNAMSIZ], int,
393                                enum ieee80211_opmode, int,
394                                const uint8_t [IEEE80211_ADDR_LEN],
395                                const uint8_t [IEEE80211_ADDR_LEN]);
396 static void     iwm_vap_delete(struct ieee80211vap *);
397 static void     iwm_xmit_queue_drain(struct iwm_softc *);
398 static void     iwm_scan_start(struct ieee80211com *);
399 static void     iwm_scan_end(struct ieee80211com *);
400 static void     iwm_update_mcast(struct ieee80211com *);
401 static void     iwm_set_channel(struct ieee80211com *);
402 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
403 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
404 static int      iwm_detach(device_t);
405
406 static int      iwm_lar_disable = 0;
407 TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
408
409 /*
410  * Firmware parser.
411  */
412
413 static int
414 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
415 {
416         const struct iwm_fw_cscheme_list *l = (const void *)data;
417
418         if (dlen < sizeof(*l) ||
419             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
420                 return EINVAL;
421
422         /* we don't actually store anything for now, always use s/w crypto */
423
424         return 0;
425 }
426
427 static int
428 iwm_firmware_store_section(struct iwm_softc *sc,
429     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
430 {
431         struct iwm_fw_sects *fws;
432         struct iwm_fw_desc *fwone;
433
434         if (type >= IWM_UCODE_TYPE_MAX)
435                 return EINVAL;
436         if (dlen < sizeof(uint32_t))
437                 return EINVAL;
438
439         fws = &sc->sc_fw.fw_sects[type];
440         if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
441                 return EINVAL;
442
443         fwone = &fws->fw_sect[fws->fw_count];
444
445         /* first 32bit are device load offset */
446         memcpy(&fwone->offset, data, sizeof(uint32_t));
447
448         /* rest is data */
449         fwone->data = data + sizeof(uint32_t);
450         fwone->len = dlen - sizeof(uint32_t);
451
452         fws->fw_count++;
453
454         return 0;
455 }
456
457 #define IWM_DEFAULT_SCAN_CHANNELS 40
458
459 /* iwlwifi: iwl-drv.c */
460 struct iwm_tlv_calib_data {
461         uint32_t ucode_type;
462         struct iwm_tlv_calib_ctrl calib;
463 } __packed;
464
465 static int
466 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
467 {
468         const struct iwm_tlv_calib_data *def_calib = data;
469         uint32_t ucode_type = le32toh(def_calib->ucode_type);
470
471         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
472                 device_printf(sc->sc_dev,
473                     "Wrong ucode_type %u for default "
474                     "calibration.\n", ucode_type);
475                 return EINVAL;
476         }
477
478         sc->sc_default_calib[ucode_type].flow_trigger =
479             def_calib->calib.flow_trigger;
480         sc->sc_default_calib[ucode_type].event_trigger =
481             def_calib->calib.event_trigger;
482
483         return 0;
484 }
485
486 static int
487 iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
488                         struct iwm_ucode_capabilities *capa)
489 {
490         const struct iwm_ucode_api *ucode_api = (const void *)data;
491         uint32_t api_index = le32toh(ucode_api->api_index);
492         uint32_t api_flags = le32toh(ucode_api->api_flags);
493         int i;
494
495         if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
496                 device_printf(sc->sc_dev,
497                     "api flags index %d larger than supported by driver\n",
498                     api_index);
499                 /* don't return an error so we can load FW that has more bits */
500                 return 0;
501         }
502
503         for (i = 0; i < 32; i++) {
504                 if (api_flags & (1U << i))
505                         setbit(capa->enabled_api, i + 32 * api_index);
506         }
507
508         return 0;
509 }
510
511 static int
512 iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
513                            struct iwm_ucode_capabilities *capa)
514 {
515         const struct iwm_ucode_capa *ucode_capa = (const void *)data;
516         uint32_t api_index = le32toh(ucode_capa->api_index);
517         uint32_t api_flags = le32toh(ucode_capa->api_capa);
518         int i;
519
520         if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
521                 device_printf(sc->sc_dev,
522                     "capa flags index %d larger than supported by driver\n",
523                     api_index);
524                 /* don't return an error so we can load FW that has more bits */
525                 return 0;
526         }
527
528         for (i = 0; i < 32; i++) {
529                 if (api_flags & (1U << i))
530                         setbit(capa->enabled_capa, i + 32 * api_index);
531         }
532
533         return 0;
534 }
535
536 static void
537 iwm_fw_info_free(struct iwm_fw_info *fw)
538 {
539         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
540         fw->fw_fp = NULL;
541         /* don't touch fw->fw_status */
542         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
543 }
544
545 static int
546 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
547 {
548         struct iwm_fw_info *fw = &sc->sc_fw;
549         const struct iwm_tlv_ucode_header *uhdr;
550         struct iwm_ucode_tlv tlv;
551         struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
552         enum iwm_ucode_tlv_type tlv_type;
553         const struct firmware *fwp;
554         const uint8_t *data;
555         uint32_t usniffer_img;
556         uint32_t paging_mem_size;
557         int num_of_cpus;
558         int error = 0;
559         size_t len;
560
561         if (fw->fw_status == IWM_FW_STATUS_DONE &&
562             ucode_type != IWM_UCODE_INIT)
563                 return 0;
564
565         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
566                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
567         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
568
569         if (fw->fw_fp != NULL)
570                 iwm_fw_info_free(fw);
571
572         /*
573          * Load firmware into driver memory.
574          * fw_fp will be set.
575          */
576         IWM_UNLOCK(sc);
577         fwp = firmware_get(sc->cfg->fw_name);
578         IWM_LOCK(sc);
579         if (fwp == NULL) {
580                 device_printf(sc->sc_dev,
581                     "could not read firmware %s (error %d)\n",
582                     sc->cfg->fw_name, error);
583                 goto out;
584         }
585         fw->fw_fp = fwp;
586
587         /* (Re-)Initialize default values. */
588         capa->flags = 0;
589         capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
590         capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
591         memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
592         memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
593         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
594
595         /*
596          * Parse firmware contents
597          */
598
599         uhdr = (const void *)fw->fw_fp->data;
600         if (*(const uint32_t *)fw->fw_fp->data != 0
601             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
602                 device_printf(sc->sc_dev, "invalid firmware %s\n",
603                     sc->cfg->fw_name);
604                 error = EINVAL;
605                 goto out;
606         }
607
608         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
609             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
610             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
611             IWM_UCODE_API(le32toh(uhdr->ver)));
612         data = uhdr->data;
613         len = fw->fw_fp->datasize - sizeof(*uhdr);
614
615         while (len >= sizeof(tlv)) {
616                 size_t tlv_len;
617                 const void *tlv_data;
618
619                 memcpy(&tlv, data, sizeof(tlv));
620                 tlv_len = le32toh(tlv.length);
621                 tlv_type = le32toh(tlv.type);
622
623                 len -= sizeof(tlv);
624                 data += sizeof(tlv);
625                 tlv_data = data;
626
627                 if (len < tlv_len) {
628                         device_printf(sc->sc_dev,
629                             "firmware too short: %zu bytes\n",
630                             len);
631                         error = EINVAL;
632                         goto parse_out;
633                 }
634
635                 switch ((int)tlv_type) {
636                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
637                         if (tlv_len < sizeof(uint32_t)) {
638                                 device_printf(sc->sc_dev,
639                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
640                                     __func__,
641                                     (int) tlv_len);
642                                 error = EINVAL;
643                                 goto parse_out;
644                         }
645                         capa->max_probe_length =
646                             le32toh(*(const uint32_t *)tlv_data);
647                         /* limit it to something sensible */
648                         if (capa->max_probe_length >
649                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
650                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
651                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
652                                     "ridiculous\n", __func__);
653                                 error = EINVAL;
654                                 goto parse_out;
655                         }
656                         break;
657                 case IWM_UCODE_TLV_PAN:
658                         if (tlv_len) {
659                                 device_printf(sc->sc_dev,
660                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
661                                     __func__,
662                                     (int) tlv_len);
663                                 error = EINVAL;
664                                 goto parse_out;
665                         }
666                         capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
667                         break;
668                 case IWM_UCODE_TLV_FLAGS:
669                         if (tlv_len < sizeof(uint32_t)) {
670                                 device_printf(sc->sc_dev,
671                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
672                                     __func__,
673                                     (int) tlv_len);
674                                 error = EINVAL;
675                                 goto parse_out;
676                         }
677                         /*
678                          * Apparently there can be many flags, but Linux driver
679                          * parses only the first one, and so do we.
680                          *
681                          * XXX: why does this override IWM_UCODE_TLV_PAN?
682                          * Intentional or a bug?  Observations from
683                          * current firmware file:
684                          *  1) TLV_PAN is parsed first
685                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
686                          * ==> this resets TLV_PAN to itself... hnnnk
687                          */
688                         capa->flags = le32toh(*(const uint32_t *)tlv_data);
689                         break;
690                 case IWM_UCODE_TLV_CSCHEME:
691                         if ((error = iwm_store_cscheme(sc,
692                             tlv_data, tlv_len)) != 0) {
693                                 device_printf(sc->sc_dev,
694                                     "%s: iwm_store_cscheme(): returned %d\n",
695                                     __func__,
696                                     error);
697                                 goto parse_out;
698                         }
699                         break;
700                 case IWM_UCODE_TLV_NUM_OF_CPU:
701                         if (tlv_len != sizeof(uint32_t)) {
702                                 device_printf(sc->sc_dev,
703                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
704                                     __func__,
705                                     (int) tlv_len);
706                                 error = EINVAL;
707                                 goto parse_out;
708                         }
709                         num_of_cpus = le32toh(*(const uint32_t *)tlv_data);
710                         if (num_of_cpus == 2) {
711                                 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
712                                         TRUE;
713                                 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
714                                         TRUE;
715                                 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
716                                         TRUE;
717                         } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
718                                 device_printf(sc->sc_dev,
719                                     "%s: Driver supports only 1 or 2 CPUs\n",
720                                     __func__);
721                                 error = EINVAL;
722                                 goto parse_out;
723                         }
724                         break;
725                 case IWM_UCODE_TLV_SEC_RT:
726                         if ((error = iwm_firmware_store_section(sc,
727                             IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
728                                 device_printf(sc->sc_dev,
729                                     "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
730                                     __func__,
731                                     error);
732                                 goto parse_out;
733                         }
734                         break;
735                 case IWM_UCODE_TLV_SEC_INIT:
736                         if ((error = iwm_firmware_store_section(sc,
737                             IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
738                                 device_printf(sc->sc_dev,
739                                     "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
740                                     __func__,
741                                     error);
742                                 goto parse_out;
743                         }
744                         break;
745                 case IWM_UCODE_TLV_SEC_WOWLAN:
746                         if ((error = iwm_firmware_store_section(sc,
747                             IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
748                                 device_printf(sc->sc_dev,
749                                     "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
750                                     __func__,
751                                     error);
752                                 goto parse_out;
753                         }
754                         break;
755                 case IWM_UCODE_TLV_DEF_CALIB:
756                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
757                                 device_printf(sc->sc_dev,
758                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
759                                     __func__,
760                                     (int) tlv_len,
761                                     (int) sizeof(struct iwm_tlv_calib_data));
762                                 error = EINVAL;
763                                 goto parse_out;
764                         }
765                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
766                                 device_printf(sc->sc_dev,
767                                     "%s: iwm_set_default_calib() failed: %d\n",
768                                     __func__,
769                                     error);
770                                 goto parse_out;
771                         }
772                         break;
773                 case IWM_UCODE_TLV_PHY_SKU:
774                         if (tlv_len != sizeof(uint32_t)) {
775                                 error = EINVAL;
776                                 device_printf(sc->sc_dev,
777                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
778                                     __func__,
779                                     (int) tlv_len);
780                                 goto parse_out;
781                         }
782                         sc->sc_fw.phy_config =
783                             le32toh(*(const uint32_t *)tlv_data);
784                         sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
785                                                   IWM_FW_PHY_CFG_TX_CHAIN) >>
786                                                   IWM_FW_PHY_CFG_TX_CHAIN_POS;
787                         sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
788                                                   IWM_FW_PHY_CFG_RX_CHAIN) >>
789                                                   IWM_FW_PHY_CFG_RX_CHAIN_POS;
790                         break;
791
792                 case IWM_UCODE_TLV_API_CHANGES_SET: {
793                         if (tlv_len != sizeof(struct iwm_ucode_api)) {
794                                 error = EINVAL;
795                                 goto parse_out;
796                         }
797                         if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
798                                 error = EINVAL;
799                                 goto parse_out;
800                         }
801                         break;
802                 }
803
804                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
805                         if (tlv_len != sizeof(struct iwm_ucode_capa)) {
806                                 error = EINVAL;
807                                 goto parse_out;
808                         }
809                         if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
810                                 error = EINVAL;
811                                 goto parse_out;
812                         }
813                         break;
814                 }
815
816                 case 48: /* undocumented TLV */
817                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
818                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
819                         /* ignore, not used by current driver */
820                         break;
821
822                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
823                         if ((error = iwm_firmware_store_section(sc,
824                             IWM_UCODE_REGULAR_USNIFFER, tlv_data,
825                             tlv_len)) != 0)
826                                 goto parse_out;
827                         break;
828
829                 case IWM_UCODE_TLV_PAGING:
830                         if (tlv_len != sizeof(uint32_t)) {
831                                 error = EINVAL;
832                                 goto parse_out;
833                         }
834                         paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
835
836                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
837                             "%s: Paging: paging enabled (size = %u bytes)\n",
838                             __func__, paging_mem_size);
839                         if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
840                                 device_printf(sc->sc_dev,
841                                         "%s: Paging: driver supports up to %u bytes for paging image\n",
842                                         __func__, IWM_MAX_PAGING_IMAGE_SIZE);
843                                 error = EINVAL;
844                                 goto out;
845                         }
846                         if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
847                                 device_printf(sc->sc_dev,
848                                     "%s: Paging: image isn't multiple %u\n",
849                                     __func__, IWM_FW_PAGING_SIZE);
850                                 error = EINVAL;
851                                 goto out;
852                         }
853
854                         sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
855                             paging_mem_size;
856                         usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
857                         sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
858                             paging_mem_size;
859                         break;
860
861                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
862                         if (tlv_len != sizeof(uint32_t)) {
863                                 error = EINVAL;
864                                 goto parse_out;
865                         }
866                         capa->n_scan_channels =
867                             le32toh(*(const uint32_t *)tlv_data);
868                         break;
869
870                 case IWM_UCODE_TLV_FW_VERSION:
871                         if (tlv_len != sizeof(uint32_t) * 3) {
872                                 error = EINVAL;
873                                 goto parse_out;
874                         }
875                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
876                             "%d.%d.%d",
877                             le32toh(((const uint32_t *)tlv_data)[0]),
878                             le32toh(((const uint32_t *)tlv_data)[1]),
879                             le32toh(((const uint32_t *)tlv_data)[2]));
880                         break;
881
882                 case IWM_UCODE_TLV_FW_MEM_SEG:
883                         break;
884
885                 default:
886                         device_printf(sc->sc_dev,
887                             "%s: unknown firmware section %d, abort\n",
888                             __func__, tlv_type);
889                         error = EINVAL;
890                         goto parse_out;
891                 }
892
893                 len -= roundup(tlv_len, 4);
894                 data += roundup(tlv_len, 4);
895         }
896
897         KASSERT(error == 0, ("unhandled error"));
898
899  parse_out:
900         if (error) {
901                 device_printf(sc->sc_dev, "firmware parse error %d, "
902                     "section type %d\n", error, tlv_type);
903         }
904
905  out:
906         if (error) {
907                 fw->fw_status = IWM_FW_STATUS_NONE;
908                 if (fw->fw_fp != NULL)
909                         iwm_fw_info_free(fw);
910         } else
911                 fw->fw_status = IWM_FW_STATUS_DONE;
912         wakeup(&sc->sc_fw);
913
914         return error;
915 }
916
917 /*
918  * DMA resource routines
919  */
920
921 /* fwmem is used to load firmware onto the card */
922 static int
923 iwm_alloc_fwmem(struct iwm_softc *sc)
924 {
925         /* Must be aligned on a 16-byte boundary. */
926         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
927             IWM_FH_MEM_TB_MAX_LENGTH, 16);
928 }
929
930 /* tx scheduler rings.  not used? */
931 static int
932 iwm_alloc_sched(struct iwm_softc *sc)
933 {
934         /* TX scheduler rings must be aligned on a 1KB boundary. */
935         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
936             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
937 }
938
939 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
940 static int
941 iwm_alloc_kw(struct iwm_softc *sc)
942 {
943         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
944 }
945
946 /* interrupt cause table */
947 static int
948 iwm_alloc_ict(struct iwm_softc *sc)
949 {
950         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
951             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
952 }
953
954 static int
955 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
956 {
957         bus_size_t size;
958         int i, error;
959
960         ring->cur = 0;
961
962         /* Allocate RX descriptors (256-byte aligned). */
963         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
964         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
965         if (error != 0) {
966                 device_printf(sc->sc_dev,
967                     "could not allocate RX ring DMA memory\n");
968                 goto fail;
969         }
970         ring->desc = ring->desc_dma.vaddr;
971
972         /* Allocate RX status area (16-byte aligned). */
973         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
974             sizeof(*ring->stat), 16);
975         if (error != 0) {
976                 device_printf(sc->sc_dev,
977                     "could not allocate RX status DMA memory\n");
978                 goto fail;
979         }
980         ring->stat = ring->stat_dma.vaddr;
981
982         /* Create RX buffer DMA tag. */
983         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
984             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
985             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
986         if (error != 0) {
987                 device_printf(sc->sc_dev,
988                     "%s: could not create RX buf DMA tag, error %d\n",
989                     __func__, error);
990                 goto fail;
991         }
992
993         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
994         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
995         if (error != 0) {
996                 device_printf(sc->sc_dev,
997                     "%s: could not create RX buf DMA map, error %d\n",
998                     __func__, error);
999                 goto fail;
1000         }
1001         /*
1002          * Allocate and map RX buffers.
1003          */
1004         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1005                 struct iwm_rx_data *data = &ring->data[i];
1006                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1007                 if (error != 0) {
1008                         device_printf(sc->sc_dev,
1009                             "%s: could not create RX buf DMA map, error %d\n",
1010                             __func__, error);
1011                         goto fail;
1012                 }
1013                 data->m = NULL;
1014
1015                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1016                         goto fail;
1017                 }
1018         }
1019         return 0;
1020
1021 fail:   iwm_free_rx_ring(sc, ring);
1022         return error;
1023 }
1024
1025 static void
1026 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1027 {
1028         /* Reset the ring state */
1029         ring->cur = 0;
1030
1031         /*
1032          * The hw rx ring index in shared memory must also be cleared,
1033          * otherwise the discrepancy can cause reprocessing chaos.
1034          */
1035         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1036 }
1037
1038 static void
1039 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1040 {
1041         int i;
1042
1043         iwm_dma_contig_free(&ring->desc_dma);
1044         iwm_dma_contig_free(&ring->stat_dma);
1045
1046         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1047                 struct iwm_rx_data *data = &ring->data[i];
1048
1049                 if (data->m != NULL) {
1050                         bus_dmamap_sync(ring->data_dmat, data->map,
1051                             BUS_DMASYNC_POSTREAD);
1052                         bus_dmamap_unload(ring->data_dmat, data->map);
1053                         m_freem(data->m);
1054                         data->m = NULL;
1055                 }
1056                 if (data->map != NULL) {
1057                         bus_dmamap_destroy(ring->data_dmat, data->map);
1058                         data->map = NULL;
1059                 }
1060         }
1061         if (ring->spare_map != NULL) {
1062                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1063                 ring->spare_map = NULL;
1064         }
1065         if (ring->data_dmat != NULL) {
1066                 bus_dma_tag_destroy(ring->data_dmat);
1067                 ring->data_dmat = NULL;
1068         }
1069 }
1070
1071 static int
1072 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1073 {
1074         bus_addr_t paddr;
1075         bus_size_t size;
1076         size_t maxsize;
1077         int nsegments;
1078         int i, error;
1079
1080         ring->qid = qid;
1081         ring->queued = 0;
1082         ring->cur = 0;
1083
1084         /* Allocate TX descriptors (256-byte aligned). */
1085         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1086         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1087         if (error != 0) {
1088                 device_printf(sc->sc_dev,
1089                     "could not allocate TX ring DMA memory\n");
1090                 goto fail;
1091         }
1092         ring->desc = ring->desc_dma.vaddr;
1093
1094         /*
1095          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1096          * to allocate commands space for other rings.
1097          */
1098         if (qid > IWM_MVM_CMD_QUEUE)
1099                 return 0;
1100
1101         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1102         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1103         if (error != 0) {
1104                 device_printf(sc->sc_dev,
1105                     "could not allocate TX cmd DMA memory\n");
1106                 goto fail;
1107         }
1108         ring->cmd = ring->cmd_dma.vaddr;
1109
1110         /* FW commands may require more mapped space than packets. */
1111         if (qid == IWM_MVM_CMD_QUEUE) {
1112                 maxsize = IWM_RBUF_SIZE;
1113                 nsegments = 1;
1114         } else {
1115                 maxsize = MCLBYTES;
1116                 nsegments = IWM_MAX_SCATTER - 2;
1117         }
1118
1119         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1120             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1121             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1122         if (error != 0) {
1123                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1124                 goto fail;
1125         }
1126
1127         paddr = ring->cmd_dma.paddr;
1128         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1129                 struct iwm_tx_data *data = &ring->data[i];
1130
1131                 data->cmd_paddr = paddr;
1132                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1133                     + offsetof(struct iwm_tx_cmd, scratch);
1134                 paddr += sizeof(struct iwm_device_cmd);
1135
1136                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1137                 if (error != 0) {
1138                         device_printf(sc->sc_dev,
1139                             "could not create TX buf DMA map\n");
1140                         goto fail;
1141                 }
1142         }
1143         KASSERT(paddr == ring->cmd_dma.paddr + size,
1144             ("invalid physical address"));
1145         return 0;
1146
1147 fail:   iwm_free_tx_ring(sc, ring);
1148         return error;
1149 }
1150
1151 static void
1152 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1153 {
1154         int i;
1155
1156         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1157                 struct iwm_tx_data *data = &ring->data[i];
1158
1159                 if (data->m != NULL) {
1160                         bus_dmamap_sync(ring->data_dmat, data->map,
1161                             BUS_DMASYNC_POSTWRITE);
1162                         bus_dmamap_unload(ring->data_dmat, data->map);
1163                         m_freem(data->m);
1164                         data->m = NULL;
1165                 }
1166         }
1167         /* Clear TX descriptors. */
1168         memset(ring->desc, 0, ring->desc_dma.size);
1169         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1170             BUS_DMASYNC_PREWRITE);
1171         sc->qfullmsk &= ~(1 << ring->qid);
1172         ring->queued = 0;
1173         ring->cur = 0;
1174
1175         if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1176                 iwm_pcie_clear_cmd_in_flight(sc);
1177 }
1178
1179 static void
1180 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1181 {
1182         int i;
1183
1184         iwm_dma_contig_free(&ring->desc_dma);
1185         iwm_dma_contig_free(&ring->cmd_dma);
1186
1187         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1188                 struct iwm_tx_data *data = &ring->data[i];
1189
1190                 if (data->m != NULL) {
1191                         bus_dmamap_sync(ring->data_dmat, data->map,
1192                             BUS_DMASYNC_POSTWRITE);
1193                         bus_dmamap_unload(ring->data_dmat, data->map);
1194                         m_freem(data->m);
1195                         data->m = NULL;
1196                 }
1197                 if (data->map != NULL) {
1198                         bus_dmamap_destroy(ring->data_dmat, data->map);
1199                         data->map = NULL;
1200                 }
1201         }
1202         if (ring->data_dmat != NULL) {
1203                 bus_dma_tag_destroy(ring->data_dmat);
1204                 ring->data_dmat = NULL;
1205         }
1206 }
1207
1208 /*
1209  * High-level hardware frobbing routines
1210  */
1211
1212 static void
1213 iwm_enable_interrupts(struct iwm_softc *sc)
1214 {
1215         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1216         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1217 }
1218
1219 static void
1220 iwm_restore_interrupts(struct iwm_softc *sc)
1221 {
1222         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1223 }
1224
1225 static void
1226 iwm_disable_interrupts(struct iwm_softc *sc)
1227 {
1228         /* disable interrupts */
1229         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1230
1231         /* acknowledge all interrupts */
1232         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1233         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1234 }
1235
1236 static void
1237 iwm_ict_reset(struct iwm_softc *sc)
1238 {
1239         iwm_disable_interrupts(sc);
1240
1241         /* Reset ICT table. */
1242         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1243         sc->ict_cur = 0;
1244
1245         /* Set physical address of ICT table (4KB aligned). */
1246         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1247             IWM_CSR_DRAM_INT_TBL_ENABLE
1248             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1249             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1250             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1251
1252         /* Switch to ICT interrupt mode in driver. */
1253         sc->sc_flags |= IWM_FLAG_USE_ICT;
1254
1255         /* Re-enable interrupts. */
1256         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1257         iwm_enable_interrupts(sc);
1258 }
1259
1260 /* iwlwifi pcie/trans.c */
1261
1262 /*
1263  * Since this .. hard-resets things, it's time to actually
1264  * mark the first vap (if any) as having no mac context.
1265  * It's annoying, but since the driver is potentially being
1266  * stop/start'ed whilst active (thanks openbsd port!) we
1267  * have to correctly track this.
1268  */
1269 static void
1270 iwm_stop_device(struct iwm_softc *sc)
1271 {
1272         struct ieee80211com *ic = &sc->sc_ic;
1273         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1274         int chnl, qid;
1275         uint32_t mask = 0;
1276
1277         /* tell the device to stop sending interrupts */
1278         iwm_disable_interrupts(sc);
1279
1280         /*
1281          * FreeBSD-local: mark the first vap as not-uploaded,
1282          * so the next transition through auth/assoc
1283          * will correctly populate the MAC context.
1284          */
1285         if (vap) {
1286                 struct iwm_vap *iv = IWM_VAP(vap);
1287                 iv->phy_ctxt = NULL;
1288                 iv->is_uploaded = 0;
1289         }
1290
1291         /* device going down, Stop using ICT table */
1292         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1293
1294         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1295
1296         if (iwm_nic_lock(sc)) {
1297                 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1298
1299                 /* Stop each Tx DMA channel */
1300                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1301                         IWM_WRITE(sc,
1302                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1303                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1304                 }
1305
1306                 /* Wait for DMA channels to be idle */
1307                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1308                     5000)) {
1309                         device_printf(sc->sc_dev,
1310                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1311                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1312                 }
1313                 iwm_nic_unlock(sc);
1314         }
1315         iwm_pcie_rx_stop(sc);
1316
1317         /* Stop RX ring. */
1318         iwm_reset_rx_ring(sc, &sc->rxq);
1319
1320         /* Reset all TX rings. */
1321         for (qid = 0; qid < nitems(sc->txq); qid++)
1322                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1323
1324         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1325                 /* Power-down device's busmaster DMA clocks */
1326                 if (iwm_nic_lock(sc)) {
1327                         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1328                             IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1329                         iwm_nic_unlock(sc);
1330                 }
1331                 DELAY(5);
1332         }
1333
1334         /* Make sure (redundant) we've released our request to stay awake */
1335         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1336             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1337
1338         /* Stop the device, and put it in low power state */
1339         iwm_apm_stop(sc);
1340
1341         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1342          * Clean again the interrupt here
1343          */
1344         iwm_disable_interrupts(sc);
1345         /* stop and reset the on-board processor */
1346         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1347
1348         /*
1349          * Even if we stop the HW, we still want the RF kill
1350          * interrupt
1351          */
1352         iwm_enable_rfkill_int(sc);
1353         iwm_check_rfkill(sc);
1354 }
1355
1356 /* iwlwifi: mvm/ops.c */
1357 static void
1358 iwm_mvm_nic_config(struct iwm_softc *sc)
1359 {
1360         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1361         uint32_t reg_val = 0;
1362         uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1363
1364         radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1365             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1366         radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1367             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1368         radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1369             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1370
1371         /* SKU control */
1372         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1373             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1374         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1375             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1376
1377         /* radio configuration */
1378         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1379         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1380         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1381
1382         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1383
1384         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1385             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1386             radio_cfg_step, radio_cfg_dash);
1387
1388         /*
1389          * W/A : NIC is stuck in a reset state after Early PCIe power off
1390          * (PCIe power is lost before PERST# is asserted), causing ME FW
1391          * to lose ownership and not being able to obtain it back.
1392          */
1393         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1394                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1395                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1396                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1397         }
1398 }
1399
1400 static int
1401 iwm_nic_rx_init(struct iwm_softc *sc)
1402 {
1403         /*
1404          * Initialize RX ring.  This is from the iwn driver.
1405          */
1406         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1407
1408         /* Stop Rx DMA */
1409         iwm_pcie_rx_stop(sc);
1410
1411         if (!iwm_nic_lock(sc))
1412                 return EBUSY;
1413
1414         /* reset and flush pointers */
1415         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1416         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1417         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1418         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1419
1420         /* Set physical address of RX ring (256-byte aligned). */
1421         IWM_WRITE(sc,
1422             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1423
1424         /* Set physical address of RX status (16-byte aligned). */
1425         IWM_WRITE(sc,
1426             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1427
1428         /* Enable Rx DMA
1429          * XXX 5000 HW isn't supported by the iwm(4) driver.
1430          * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1431          *      the credit mechanism in 5000 HW RX FIFO
1432          * Direct rx interrupts to hosts
1433          * Rx buffer size 4 or 8k or 12k
1434          * RB timeout 0x10
1435          * 256 RBDs
1436          */
1437         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1438             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1439             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1440             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1441             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1442             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1443             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1444
1445         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1446
1447         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1448         if (sc->cfg->host_interrupt_operation_mode)
1449                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1450
1451         /*
1452          * Thus sayeth el jefe (iwlwifi) via a comment:
1453          *
1454          * This value should initially be 0 (before preparing any
1455          * RBs), should be 8 after preparing the first 8 RBs (for example)
1456          */
1457         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1458
1459         iwm_nic_unlock(sc);
1460
1461         return 0;
1462 }
1463
1464 static int
1465 iwm_nic_tx_init(struct iwm_softc *sc)
1466 {
1467         int qid;
1468
1469         if (!iwm_nic_lock(sc))
1470                 return EBUSY;
1471
1472         /* Deactivate TX scheduler. */
1473         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1474
1475         /* Set physical address of "keep warm" page (16-byte aligned). */
1476         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1477
1478         /* Initialize TX rings. */
1479         for (qid = 0; qid < nitems(sc->txq); qid++) {
1480                 struct iwm_tx_ring *txq = &sc->txq[qid];
1481
1482                 /* Set physical address of TX ring (256-byte aligned). */
1483                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1484                     txq->desc_dma.paddr >> 8);
1485                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1486                     "%s: loading ring %d descriptors (%p) at %lx\n",
1487                     __func__,
1488                     qid, txq->desc,
1489                     (unsigned long) (txq->desc_dma.paddr >> 8));
1490         }
1491
1492         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1493
1494         iwm_nic_unlock(sc);
1495
1496         return 0;
1497 }
1498
1499 static int
1500 iwm_nic_init(struct iwm_softc *sc)
1501 {
1502         int error;
1503
1504         iwm_apm_init(sc);
1505         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1506                 iwm_set_pwr(sc);
1507
1508         iwm_mvm_nic_config(sc);
1509
1510         if ((error = iwm_nic_rx_init(sc)) != 0)
1511                 return error;
1512
1513         /*
1514          * Ditto for TX, from iwn
1515          */
1516         if ((error = iwm_nic_tx_init(sc)) != 0)
1517                 return error;
1518
1519         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1520             "%s: shadow registers enabled\n", __func__);
1521         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1522
1523         return 0;
1524 }
1525
1526 int
1527 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1528 {
1529         if (!iwm_nic_lock(sc)) {
1530                 device_printf(sc->sc_dev,
1531                     "%s: cannot enable txq %d\n",
1532                     __func__,
1533                     qid);
1534                 return EBUSY;
1535         }
1536
1537         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1538
1539         if (qid == IWM_MVM_CMD_QUEUE) {
1540                 /* unactivate before configuration */
1541                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1542                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1543                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1544
1545                 iwm_nic_unlock(sc);
1546
1547                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1548
1549                 if (!iwm_nic_lock(sc)) {
1550                         device_printf(sc->sc_dev,
1551                             "%s: cannot enable txq %d\n", __func__, qid);
1552                         return EBUSY;
1553                 }
1554                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1555                 iwm_nic_unlock(sc);
1556
1557                 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1558                 /* Set scheduler window size and frame limit. */
1559                 iwm_write_mem32(sc,
1560                     sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1561                     sizeof(uint32_t),
1562                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1563                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1564                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1565                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1566
1567                 if (!iwm_nic_lock(sc)) {
1568                         device_printf(sc->sc_dev,
1569                             "%s: cannot enable txq %d\n", __func__, qid);
1570                         return EBUSY;
1571                 }
1572                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1573                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1574                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1575                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1576                     IWM_SCD_QUEUE_STTS_REG_MSK);
1577         } else {
1578                 struct iwm_scd_txq_cfg_cmd cmd;
1579                 int error;
1580
1581                 iwm_nic_unlock(sc);
1582
1583                 memset(&cmd, 0, sizeof(cmd));
1584                 cmd.scd_queue = qid;
1585                 cmd.enable = 1;
1586                 cmd.sta_id = sta_id;
1587                 cmd.tx_fifo = fifo;
1588                 cmd.aggregate = 0;
1589                 cmd.window = IWM_FRAME_LIMIT;
1590
1591                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1592                     sizeof(cmd), &cmd);
1593                 if (error) {
1594                         device_printf(sc->sc_dev,
1595                             "cannot enable txq %d\n", qid);
1596                         return error;
1597                 }
1598
1599                 if (!iwm_nic_lock(sc))
1600                         return EBUSY;
1601         }
1602
1603         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1604             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1605
1606         iwm_nic_unlock(sc);
1607
1608         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1609             __func__, qid, fifo);
1610
1611         return 0;
1612 }
1613
1614 static int
1615 iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1616 {
1617         int error, chnl;
1618
1619         int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1620             IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1621
1622         if (!iwm_nic_lock(sc))
1623                 return EBUSY;
1624
1625         iwm_ict_reset(sc);
1626
1627         sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1628         if (scd_base_addr != 0 &&
1629             scd_base_addr != sc->scd_base_addr) {
1630                 device_printf(sc->sc_dev,
1631                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1632                     __func__, sc->scd_base_addr, scd_base_addr);
1633         }
1634
1635         iwm_nic_unlock(sc);
1636
1637         /* reset context data, TX status and translation data */
1638         error = iwm_write_mem(sc,
1639             sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1640             NULL, clear_dwords);
1641         if (error)
1642                 return EBUSY;
1643
1644         if (!iwm_nic_lock(sc))
1645                 return EBUSY;
1646
1647         /* Set physical address of TX scheduler rings (1KB aligned). */
1648         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1649
1650         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1651
1652         iwm_nic_unlock(sc);
1653
1654         /* enable command channel */
1655         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1656         if (error)
1657                 return error;
1658
1659         if (!iwm_nic_lock(sc))
1660                 return EBUSY;
1661
1662         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1663
1664         /* Enable DMA channels. */
1665         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1666                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1667                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1668                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1669         }
1670
1671         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1672             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1673
1674         iwm_nic_unlock(sc);
1675
1676         /* Enable L1-Active */
1677         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1678                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1679                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1680         }
1681
1682         return error;
1683 }
1684
1685 /*
1686  * NVM read access and content parsing.  We do not support
1687  * external NVM or writing NVM.
1688  * iwlwifi/mvm/nvm.c
1689  */
1690
1691 /* Default NVM size to read */
1692 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1693
1694 #define IWM_NVM_WRITE_OPCODE 1
1695 #define IWM_NVM_READ_OPCODE 0
1696
1697 /* load nvm chunk response */
1698 enum {
1699         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1700         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1701 };
1702
1703 static int
1704 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1705         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1706 {
1707         struct iwm_nvm_access_cmd nvm_access_cmd = {
1708                 .offset = htole16(offset),
1709                 .length = htole16(length),
1710                 .type = htole16(section),
1711                 .op_code = IWM_NVM_READ_OPCODE,
1712         };
1713         struct iwm_nvm_access_resp *nvm_resp;
1714         struct iwm_rx_packet *pkt;
1715         struct iwm_host_cmd cmd = {
1716                 .id = IWM_NVM_ACCESS_CMD,
1717                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1718                 .data = { &nvm_access_cmd, },
1719         };
1720         int ret, bytes_read, offset_read;
1721         uint8_t *resp_data;
1722
1723         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1724
1725         ret = iwm_send_cmd(sc, &cmd);
1726         if (ret) {
1727                 device_printf(sc->sc_dev,
1728                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1729                 return ret;
1730         }
1731
1732         pkt = cmd.resp_pkt;
1733
1734         /* Extract NVM response */
1735         nvm_resp = (void *)pkt->data;
1736         ret = le16toh(nvm_resp->status);
1737         bytes_read = le16toh(nvm_resp->length);
1738         offset_read = le16toh(nvm_resp->offset);
1739         resp_data = nvm_resp->data;
1740         if (ret) {
1741                 if ((offset != 0) &&
1742                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1743                         /*
1744                          * meaning of NOT_VALID_ADDRESS:
1745                          * driver try to read chunk from address that is
1746                          * multiple of 2K and got an error since addr is empty.
1747                          * meaning of (offset != 0): driver already
1748                          * read valid data from another chunk so this case
1749                          * is not an error.
1750                          */
1751                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1752                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1753                                     offset);
1754                         *len = 0;
1755                         ret = 0;
1756                 } else {
1757                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1758                                     "NVM access command failed with status %d\n", ret);
1759                         ret = EIO;
1760                 }
1761                 goto exit;
1762         }
1763
1764         if (offset_read != offset) {
1765                 device_printf(sc->sc_dev,
1766                     "NVM ACCESS response with invalid offset %d\n",
1767                     offset_read);
1768                 ret = EINVAL;
1769                 goto exit;
1770         }
1771
1772         if (bytes_read > length) {
1773                 device_printf(sc->sc_dev,
1774                     "NVM ACCESS response with too much data "
1775                     "(%d bytes requested, %d bytes received)\n",
1776                     length, bytes_read);
1777                 ret = EINVAL;
1778                 goto exit;
1779         }
1780
1781         /* Write data to NVM */
1782         memcpy(data + offset, resp_data, bytes_read);
1783         *len = bytes_read;
1784
1785  exit:
1786         iwm_free_resp(sc, &cmd);
1787         return ret;
1788 }
1789
1790 /*
1791  * Reads an NVM section completely.
1792  * NICs prior to 7000 family don't have a real NVM, but just read
1793  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1794  * by uCode, we need to manually check in this case that we don't
1795  * overflow and try to read more than the EEPROM size.
1796  * For 7000 family NICs, we supply the maximal size we can read, and
1797  * the uCode fills the response with as much data as we can,
1798  * without overflowing, so no check is needed.
1799  */
1800 static int
1801 iwm_nvm_read_section(struct iwm_softc *sc,
1802         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1803 {
1804         uint16_t seglen, length, offset = 0;
1805         int ret;
1806
1807         /* Set nvm section read length */
1808         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1809
1810         seglen = length;
1811
1812         /* Read the NVM until exhausted (reading less than requested) */
1813         while (seglen == length) {
1814                 /* Check no memory assumptions fail and cause an overflow */
1815                 if ((size_read + offset + length) >
1816                     sc->cfg->eeprom_size) {
1817                         device_printf(sc->sc_dev,
1818                             "EEPROM size is too small for NVM\n");
1819                         return ENOBUFS;
1820                 }
1821
1822                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1823                 if (ret) {
1824                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1825                                     "Cannot read NVM from section %d offset %d, length %d\n",
1826                                     section, offset, length);
1827                         return ret;
1828                 }
1829                 offset += seglen;
1830         }
1831
1832         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1833                     "NVM section %d read completed\n", section);
1834         *len = offset;
1835         return 0;
1836 }
1837
1838 /*
1839  * BEGIN IWM_NVM_PARSE
1840  */
1841
1842 /* iwlwifi/iwl-nvm-parse.c */
1843
1844 /* NVM offsets (in words) definitions */
1845 enum iwm_nvm_offsets {
1846         /* NVM HW-Section offset (in words) definitions */
1847         IWM_HW_ADDR = 0x15,
1848
1849 /* NVM SW-Section offset (in words) definitions */
1850         IWM_NVM_SW_SECTION = 0x1C0,
1851         IWM_NVM_VERSION = 0,
1852         IWM_RADIO_CFG = 1,
1853         IWM_SKU = 2,
1854         IWM_N_HW_ADDRS = 3,
1855         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1856
1857 /* NVM calibration section offset (in words) definitions */
1858         IWM_NVM_CALIB_SECTION = 0x2B8,
1859         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1860 };
1861
1862 enum iwm_8000_nvm_offsets {
1863         /* NVM HW-Section offset (in words) definitions */
1864         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1865         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1866         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1867         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1868         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1869
1870         /* NVM SW-Section offset (in words) definitions */
1871         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1872         IWM_NVM_VERSION_8000 = 0,
1873         IWM_RADIO_CFG_8000 = 0,
1874         IWM_SKU_8000 = 2,
1875         IWM_N_HW_ADDRS_8000 = 3,
1876
1877         /* NVM REGULATORY -Section offset (in words) definitions */
1878         IWM_NVM_CHANNELS_8000 = 0,
1879         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1880         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1881         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1882
1883         /* NVM calibration section offset (in words) definitions */
1884         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1885         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1886 };
1887
1888 /* SKU Capabilities (actual values from NVM definition) */
1889 enum nvm_sku_bits {
1890         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1891         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1892         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1893         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1894 };
1895
1896 /* radio config bits (actual values from NVM definition) */
1897 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1898 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1899 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1900 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1901 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1902 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1903
1904 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1905 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1906 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1907 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1908 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1909 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1910
1911 #define DEFAULT_MAX_TX_POWER 16
1912
1913 /**
1914  * enum iwm_nvm_channel_flags - channel flags in NVM
1915  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1916  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1917  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1918  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1919  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1920  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1921  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1922  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1923  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1924  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1925  */
1926 enum iwm_nvm_channel_flags {
1927         IWM_NVM_CHANNEL_VALID = (1 << 0),
1928         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1929         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1930         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1931         IWM_NVM_CHANNEL_DFS = (1 << 7),
1932         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1933         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1934         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1935         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1936 };
1937
1938 /*
1939  * Translate EEPROM flags to net80211.
1940  */
1941 static uint32_t
1942 iwm_eeprom_channel_flags(uint16_t ch_flags)
1943 {
1944         uint32_t nflags;
1945
1946         nflags = 0;
1947         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1948                 nflags |= IEEE80211_CHAN_PASSIVE;
1949         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1950                 nflags |= IEEE80211_CHAN_NOADHOC;
1951         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1952                 nflags |= IEEE80211_CHAN_DFS;
1953                 /* Just in case. */
1954                 nflags |= IEEE80211_CHAN_NOADHOC;
1955         }
1956
1957         return (nflags);
1958 }
1959
1960 static void
1961 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1962     int maxchans, int *nchans, int ch_idx, size_t ch_num,
1963     const uint8_t bands[])
1964 {
1965         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1966         uint32_t nflags;
1967         uint16_t ch_flags;
1968         uint8_t ieee;
1969         int error;
1970
1971         for (; ch_idx < ch_num; ch_idx++) {
1972                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1973                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1974                         ieee = iwm_nvm_channels[ch_idx];
1975                 else
1976                         ieee = iwm_nvm_channels_8000[ch_idx];
1977
1978                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1979                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1980                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
1981                             ieee, ch_flags,
1982                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1983                             "5.2" : "2.4");
1984                         continue;
1985                 }
1986
1987                 nflags = iwm_eeprom_channel_flags(ch_flags);
1988                 error = ieee80211_add_channel(chans, maxchans, nchans,
1989                     ieee, 0, 0, nflags, bands);
1990                 if (error != 0)
1991                         break;
1992
1993                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1994                     "Ch. %d Flags %x [%sGHz] - Added\n",
1995                     ieee, ch_flags,
1996                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1997                     "5.2" : "2.4");
1998         }
1999 }
2000
2001 static void
2002 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2003     struct ieee80211_channel chans[])
2004 {
2005         struct iwm_softc *sc = ic->ic_softc;
2006         struct iwm_nvm_data *data = sc->nvm_data;
2007         uint8_t bands[IEEE80211_MODE_BYTES];
2008         size_t ch_num;
2009
2010         memset(bands, 0, sizeof(bands));
2011         /* 1-13: 11b/g channels. */
2012         setbit(bands, IEEE80211_MODE_11B);
2013         setbit(bands, IEEE80211_MODE_11G);
2014         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2015             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2016
2017         /* 14: 11b channel only. */
2018         clrbit(bands, IEEE80211_MODE_11G);
2019         iwm_add_channel_band(sc, chans, maxchans, nchans,
2020             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2021
2022         if (data->sku_cap_band_52GHz_enable) {
2023                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2024                         ch_num = nitems(iwm_nvm_channels);
2025                 else
2026                         ch_num = nitems(iwm_nvm_channels_8000);
2027                 memset(bands, 0, sizeof(bands));
2028                 setbit(bands, IEEE80211_MODE_11A);
2029                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2030                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2031         }
2032 }
2033
2034 static void
2035 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2036         const uint16_t *mac_override, const uint16_t *nvm_hw)
2037 {
2038         const uint8_t *hw_addr;
2039
2040         if (mac_override) {
2041                 static const uint8_t reserved_mac[] = {
2042                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2043                 };
2044
2045                 hw_addr = (const uint8_t *)(mac_override +
2046                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2047
2048                 /*
2049                  * Store the MAC address from MAO section.
2050                  * No byte swapping is required in MAO section
2051                  */
2052                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2053
2054                 /*
2055                  * Force the use of the OTP MAC address in case of reserved MAC
2056                  * address in the NVM, or if address is given but invalid.
2057                  */
2058                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2059                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2060                     iwm_is_valid_ether_addr(data->hw_addr) &&
2061                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2062                         return;
2063
2064                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2065                     "%s: mac address from nvm override section invalid\n",
2066                     __func__);
2067         }
2068
2069         if (nvm_hw) {
2070                 /* read the mac address from WFMP registers */
2071                 uint32_t mac_addr0 =
2072                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2073                 uint32_t mac_addr1 =
2074                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2075
2076                 hw_addr = (const uint8_t *)&mac_addr0;
2077                 data->hw_addr[0] = hw_addr[3];
2078                 data->hw_addr[1] = hw_addr[2];
2079                 data->hw_addr[2] = hw_addr[1];
2080                 data->hw_addr[3] = hw_addr[0];
2081
2082                 hw_addr = (const uint8_t *)&mac_addr1;
2083                 data->hw_addr[4] = hw_addr[1];
2084                 data->hw_addr[5] = hw_addr[0];
2085
2086                 return;
2087         }
2088
2089         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2090         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2091 }
2092
2093 static int
2094 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2095             const uint16_t *phy_sku)
2096 {
2097         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2098                 return le16_to_cpup(nvm_sw + IWM_SKU);
2099
2100         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2101 }
2102
2103 static int
2104 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2105 {
2106         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2107                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2108         else
2109                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2110                                                 IWM_NVM_VERSION_8000));
2111 }
2112
2113 static int
2114 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2115                   const uint16_t *phy_sku)
2116 {
2117         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2118                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2119
2120         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2121 }
2122
2123 static int
2124 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2125 {
2126         int n_hw_addr;
2127
2128         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2129                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2130
2131         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2132
2133         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2134 }
2135
2136 static void
2137 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2138                   uint32_t radio_cfg)
2139 {
2140         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2141                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2142                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2143                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2144                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2145                 return;
2146         }
2147
2148         /* set the radio configuration for family 8000 */
2149         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2150         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2151         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2152         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2153         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2154         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2155 }
2156
2157 static int
2158 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2159                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2160 {
2161 #ifdef notyet /* for FAMILY 9000 */
2162         if (cfg->mac_addr_from_csr) {
2163                 iwm_set_hw_address_from_csr(sc, data);
2164         } else
2165 #endif
2166         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2167                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2168
2169                 /* The byte order is little endian 16 bit, meaning 214365 */
2170                 data->hw_addr[0] = hw_addr[1];
2171                 data->hw_addr[1] = hw_addr[0];
2172                 data->hw_addr[2] = hw_addr[3];
2173                 data->hw_addr[3] = hw_addr[2];
2174                 data->hw_addr[4] = hw_addr[5];
2175                 data->hw_addr[5] = hw_addr[4];
2176         } else {
2177                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2178         }
2179
2180         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2181                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2182                 return EINVAL;
2183         }
2184
2185         return 0;
2186 }
2187
2188 static struct iwm_nvm_data *
2189 iwm_parse_nvm_data(struct iwm_softc *sc,
2190                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2191                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2192                    const uint16_t *phy_sku, const uint16_t *regulatory)
2193 {
2194         struct iwm_nvm_data *data;
2195         uint32_t sku, radio_cfg;
2196         uint16_t lar_config;
2197
2198         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2199                 data = malloc(sizeof(*data) +
2200                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2201                     M_DEVBUF, M_NOWAIT | M_ZERO);
2202         } else {
2203                 data = malloc(sizeof(*data) +
2204                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2205                     M_DEVBUF, M_NOWAIT | M_ZERO);
2206         }
2207         if (!data)
2208                 return NULL;
2209
2210         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2211
2212         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2213         iwm_set_radio_cfg(sc, data, radio_cfg);
2214
2215         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2216         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2217         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2218         data->sku_cap_11n_enable = 0;
2219
2220         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2221
2222         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2223                 uint16_t lar_offset = data->nvm_version < 0xE39 ?
2224                                        IWM_NVM_LAR_OFFSET_8000_OLD :
2225                                        IWM_NVM_LAR_OFFSET_8000;
2226
2227                 lar_config = le16_to_cpup(regulatory + lar_offset);
2228                 data->lar_enabled = !!(lar_config &
2229                                        IWM_NVM_LAR_ENABLED_8000);
2230         }
2231
2232         /* If no valid mac address was found - bail out */
2233         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2234                 free(data, M_DEVBUF);
2235                 return NULL;
2236         }
2237
2238         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2239                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2240                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2241         } else {
2242                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2243                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2244         }
2245
2246         return data;
2247 }
2248
2249 static void
2250 iwm_free_nvm_data(struct iwm_nvm_data *data)
2251 {
2252         if (data != NULL)
2253                 free(data, M_DEVBUF);
2254 }
2255
2256 static struct iwm_nvm_data *
2257 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2258 {
2259         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2260
2261         /* Checking for required sections */
2262         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2263                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2264                     !sections[sc->cfg->nvm_hw_section_num].data) {
2265                         device_printf(sc->sc_dev,
2266                             "Can't parse empty OTP/NVM sections\n");
2267                         return NULL;
2268                 }
2269         } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2270                 /* SW and REGULATORY sections are mandatory */
2271                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2272                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2273                         device_printf(sc->sc_dev,
2274                             "Can't parse empty OTP/NVM sections\n");
2275                         return NULL;
2276                 }
2277                 /* MAC_OVERRIDE or at least HW section must exist */
2278                 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2279                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2280                         device_printf(sc->sc_dev,
2281                             "Can't parse mac_address, empty sections\n");
2282                         return NULL;
2283                 }
2284
2285                 /* PHY_SKU section is mandatory in B0 */
2286                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2287                         device_printf(sc->sc_dev,
2288                             "Can't parse phy_sku in B0, empty sections\n");
2289                         return NULL;
2290                 }
2291         } else {
2292                 panic("unknown device family %d\n", sc->cfg->device_family);
2293         }
2294
2295         hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2296         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2297         calib = (const uint16_t *)
2298             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2299         regulatory = (const uint16_t *)
2300             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2301         mac_override = (const uint16_t *)
2302             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2303         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2304
2305         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2306             phy_sku, regulatory);
2307 }
2308
2309 static int
2310 iwm_nvm_init(struct iwm_softc *sc)
2311 {
2312         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2313         int i, ret, section;
2314         uint32_t size_read = 0;
2315         uint8_t *nvm_buffer, *temp;
2316         uint16_t len;
2317
2318         memset(nvm_sections, 0, sizeof(nvm_sections));
2319
2320         if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2321                 return EINVAL;
2322
2323         /* load NVM values from nic */
2324         /* Read From FW NVM */
2325         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2326
2327         nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2328         if (!nvm_buffer)
2329                 return ENOMEM;
2330         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2331                 /* we override the constness for initial read */
2332                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2333                                            &len, size_read);
2334                 if (ret)
2335                         continue;
2336                 size_read += len;
2337                 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2338                 if (!temp) {
2339                         ret = ENOMEM;
2340                         break;
2341                 }
2342                 memcpy(temp, nvm_buffer, len);
2343
2344                 nvm_sections[section].data = temp;
2345                 nvm_sections[section].length = len;
2346         }
2347         if (!size_read)
2348                 device_printf(sc->sc_dev, "OTP is blank\n");
2349         free(nvm_buffer, M_DEVBUF);
2350
2351         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2352         if (!sc->nvm_data)
2353                 return EINVAL;
2354         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2355                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2356
2357         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2358                 if (nvm_sections[i].data != NULL)
2359                         free(nvm_sections[i].data, M_DEVBUF);
2360         }
2361
2362         return 0;
2363 }
2364
2365 static int
2366 iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2367         const struct iwm_fw_desc *section)
2368 {
2369         struct iwm_dma_info *dma = &sc->fw_dma;
2370         uint8_t *v_addr;
2371         bus_addr_t p_addr;
2372         uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2373         int ret = 0;
2374
2375         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2376                     "%s: [%d] uCode section being loaded...\n",
2377                     __func__, section_num);
2378
2379         v_addr = dma->vaddr;
2380         p_addr = dma->paddr;
2381
2382         for (offset = 0; offset < section->len; offset += chunk_sz) {
2383                 uint32_t copy_size, dst_addr;
2384                 int extended_addr = FALSE;
2385
2386                 copy_size = MIN(chunk_sz, section->len - offset);
2387                 dst_addr = section->offset + offset;
2388
2389                 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2390                     dst_addr <= IWM_FW_MEM_EXTENDED_END)
2391                         extended_addr = TRUE;
2392
2393                 if (extended_addr)
2394                         iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2395                                           IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2396
2397                 memcpy(v_addr, (const uint8_t *)section->data + offset,
2398                     copy_size);
2399                 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2400                 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2401                                                    copy_size);
2402
2403                 if (extended_addr)
2404                         iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2405                                             IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2406
2407                 if (ret) {
2408                         device_printf(sc->sc_dev,
2409                             "%s: Could not load the [%d] uCode section\n",
2410                             __func__, section_num);
2411                         break;
2412                 }
2413         }
2414
2415         return ret;
2416 }
2417
2418 /*
2419  * ucode
2420  */
2421 static int
2422 iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2423                              bus_addr_t phy_addr, uint32_t byte_cnt)
2424 {
2425         int ret;
2426
2427         sc->sc_fw_chunk_done = 0;
2428
2429         if (!iwm_nic_lock(sc))
2430                 return EBUSY;
2431
2432         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2433             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2434
2435         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2436             dst_addr);
2437
2438         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2439             phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2440
2441         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2442             (iwm_get_dma_hi_addr(phy_addr)
2443              << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2444
2445         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2446             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2447             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2448             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2449
2450         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2451             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2452             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2453             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2454
2455         iwm_nic_unlock(sc);
2456
2457         /* wait up to 5s for this segment to load */
2458         ret = 0;
2459         while (!sc->sc_fw_chunk_done) {
2460                 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2461                 if (ret)
2462                         break;
2463         }
2464
2465         if (ret != 0) {
2466                 device_printf(sc->sc_dev,
2467                     "fw chunk addr 0x%x len %d failed to load\n",
2468                     dst_addr, byte_cnt);
2469                 return ETIMEDOUT;
2470         }
2471
2472         return 0;
2473 }
2474
2475 static int
2476 iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2477         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2478 {
2479         int shift_param;
2480         int i, ret = 0, sec_num = 0x1;
2481         uint32_t val, last_read_idx = 0;
2482
2483         if (cpu == 1) {
2484                 shift_param = 0;
2485                 *first_ucode_section = 0;
2486         } else {
2487                 shift_param = 16;
2488                 (*first_ucode_section)++;
2489         }
2490
2491         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2492                 last_read_idx = i;
2493
2494                 /*
2495                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2496                  * CPU1 to CPU2.
2497                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2498                  * CPU2 non paged to CPU2 paging sec.
2499                  */
2500                 if (!image->fw_sect[i].data ||
2501                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2502                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2503                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2504                                     "Break since Data not valid or Empty section, sec = %d\n",
2505                                     i);
2506                         break;
2507                 }
2508                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2509                 if (ret)
2510                         return ret;
2511
2512                 /* Notify the ucode of the loaded section number and status */
2513                 if (iwm_nic_lock(sc)) {
2514                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2515                         val = val | (sec_num << shift_param);
2516                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2517                         sec_num = (sec_num << 1) | 0x1;
2518                         iwm_nic_unlock(sc);
2519                 }
2520         }
2521
2522         *first_ucode_section = last_read_idx;
2523
2524         iwm_enable_interrupts(sc);
2525
2526         if (iwm_nic_lock(sc)) {
2527                 if (cpu == 1)
2528                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2529                 else
2530                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2531                 iwm_nic_unlock(sc);
2532         }
2533
2534         return 0;
2535 }
2536
2537 static int
2538 iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2539         const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2540 {
2541         int shift_param;
2542         int i, ret = 0;
2543         uint32_t last_read_idx = 0;
2544
2545         if (cpu == 1) {
2546                 shift_param = 0;
2547                 *first_ucode_section = 0;
2548         } else {
2549                 shift_param = 16;
2550                 (*first_ucode_section)++;
2551         }
2552
2553         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2554                 last_read_idx = i;
2555
2556                 /*
2557                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2558                  * CPU1 to CPU2.
2559                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2560                  * CPU2 non paged to CPU2 paging sec.
2561                  */
2562                 if (!image->fw_sect[i].data ||
2563                     image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2564                     image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2565                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2566                                     "Break since Data not valid or Empty section, sec = %d\n",
2567                                      i);
2568                         break;
2569                 }
2570
2571                 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2572                 if (ret)
2573                         return ret;
2574         }
2575
2576         *first_ucode_section = last_read_idx;
2577
2578         return 0;
2579
2580 }
2581
2582 static int
2583 iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2584         const struct iwm_fw_sects *image)
2585 {
2586         int ret = 0;
2587         int first_ucode_section;
2588
2589         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2590                      image->is_dual_cpus ? "Dual" : "Single");
2591
2592         /* load to FW the binary non secured sections of CPU1 */
2593         ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2594         if (ret)
2595                 return ret;
2596
2597         if (image->is_dual_cpus) {
2598                 /* set CPU2 header address */
2599                 if (iwm_nic_lock(sc)) {
2600                         iwm_write_prph(sc,
2601                                        IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2602                                        IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2603                         iwm_nic_unlock(sc);
2604                 }
2605
2606                 /* load to FW the binary sections of CPU2 */
2607                 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2608                                                  &first_ucode_section);
2609                 if (ret)
2610                         return ret;
2611         }
2612
2613         iwm_enable_interrupts(sc);
2614
2615         /* release CPU reset */
2616         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2617
2618         return 0;
2619 }
2620
2621 int
2622 iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2623         const struct iwm_fw_sects *image)
2624 {
2625         int ret = 0;
2626         int first_ucode_section;
2627
2628         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2629                     image->is_dual_cpus ? "Dual" : "Single");
2630
2631         /* configure the ucode to be ready to get the secured image */
2632         /* release CPU reset */
2633         if (iwm_nic_lock(sc)) {
2634                 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2635                     IWM_RELEASE_CPU_RESET_BIT);
2636                 iwm_nic_unlock(sc);
2637         }
2638
2639         /* load to FW the binary Secured sections of CPU1 */
2640         ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2641             &first_ucode_section);
2642         if (ret)
2643                 return ret;
2644
2645         /* load to FW the binary sections of CPU2 */
2646         return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2647             &first_ucode_section);
2648 }
2649
2650 /* XXX Get rid of this definition */
2651 static inline void
2652 iwm_enable_fw_load_int(struct iwm_softc *sc)
2653 {
2654         IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2655         sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2656         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2657 }
2658
2659 /* XXX Add proper rfkill support code */
2660 static int
2661 iwm_start_fw(struct iwm_softc *sc,
2662         const struct iwm_fw_sects *fw)
2663 {
2664         int ret;
2665
2666         /* This may fail if AMT took ownership of the device */
2667         if (iwm_prepare_card_hw(sc)) {
2668                 device_printf(sc->sc_dev,
2669                     "%s: Exit HW not ready\n", __func__);
2670                 ret = EIO;
2671                 goto out;
2672         }
2673
2674         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2675
2676         iwm_disable_interrupts(sc);
2677
2678         /* make sure rfkill handshake bits are cleared */
2679         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2680         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2681             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2682
2683         /* clear (again), then enable host interrupts */
2684         IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2685
2686         ret = iwm_nic_init(sc);
2687         if (ret) {
2688                 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2689                 goto out;
2690         }
2691
2692         /*
2693          * Now, we load the firmware and don't want to be interrupted, even
2694          * by the RF-Kill interrupt (hence mask all the interrupt besides the
2695          * FH_TX interrupt which is needed to load the firmware). If the
2696          * RF-Kill switch is toggled, we will find out after having loaded
2697          * the firmware and return the proper value to the caller.
2698          */
2699         iwm_enable_fw_load_int(sc);
2700
2701         /* really make sure rfkill handshake bits are cleared */
2702         /* maybe we should write a few times more?  just to make sure */
2703         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2704         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2705
2706         /* Load the given image to the HW */
2707         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2708                 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2709         else
2710                 ret = iwm_pcie_load_given_ucode(sc, fw);
2711
2712         /* XXX re-check RF-Kill state */
2713
2714 out:
2715         return ret;
2716 }
2717
2718 static int
2719 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2720 {
2721         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2722                 .valid = htole32(valid_tx_ant),
2723         };
2724
2725         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2726             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2727 }
2728
2729 /* iwlwifi: mvm/fw.c */
2730 static int
2731 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2732 {
2733         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2734         enum iwm_ucode_type ucode_type = sc->cur_ucode;
2735
2736         /* Set parameters */
2737         phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2738         phy_cfg_cmd.calib_control.event_trigger =
2739             sc->sc_default_calib[ucode_type].event_trigger;
2740         phy_cfg_cmd.calib_control.flow_trigger =
2741             sc->sc_default_calib[ucode_type].flow_trigger;
2742
2743         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2744             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2745         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2746             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2747 }
2748
2749 static int
2750 iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2751 {
2752         struct iwm_mvm_alive_data *alive_data = data;
2753         struct iwm_mvm_alive_resp_ver1 *palive1;
2754         struct iwm_mvm_alive_resp_ver2 *palive2;
2755         struct iwm_mvm_alive_resp *palive;
2756
2757         if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2758                 palive1 = (void *)pkt->data;
2759
2760                 sc->support_umac_log = FALSE;
2761                 sc->error_event_table =
2762                         le32toh(palive1->error_event_table_ptr);
2763                 sc->log_event_table =
2764                         le32toh(palive1->log_event_table_ptr);
2765                 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2766
2767                 alive_data->valid = le16toh(palive1->status) ==
2768                                     IWM_ALIVE_STATUS_OK;
2769                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2770                             "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2771                              le16toh(palive1->status), palive1->ver_type,
2772                              palive1->ver_subtype, palive1->flags);
2773         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2774                 palive2 = (void *)pkt->data;
2775                 sc->error_event_table =
2776                         le32toh(palive2->error_event_table_ptr);
2777                 sc->log_event_table =
2778                         le32toh(palive2->log_event_table_ptr);
2779                 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2780                 sc->umac_error_event_table =
2781                         le32toh(palive2->error_info_addr);
2782
2783                 alive_data->valid = le16toh(palive2->status) ==
2784                                     IWM_ALIVE_STATUS_OK;
2785                 if (sc->umac_error_event_table)
2786                         sc->support_umac_log = TRUE;
2787
2788                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2789                             "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2790                             le16toh(palive2->status), palive2->ver_type,
2791                             palive2->ver_subtype, palive2->flags);
2792
2793                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2794                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2795                             palive2->umac_major, palive2->umac_minor);
2796         } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2797                 palive = (void *)pkt->data;
2798
2799                 sc->error_event_table =
2800                         le32toh(palive->error_event_table_ptr);
2801                 sc->log_event_table =
2802                         le32toh(palive->log_event_table_ptr);
2803                 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2804                 sc->umac_error_event_table =
2805                         le32toh(palive->error_info_addr);
2806
2807                 alive_data->valid = le16toh(palive->status) ==
2808                                     IWM_ALIVE_STATUS_OK;
2809                 if (sc->umac_error_event_table)
2810                         sc->support_umac_log = TRUE;
2811
2812                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2813                             "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2814                             le16toh(palive->status), palive->ver_type,
2815                             palive->ver_subtype, palive->flags);
2816
2817                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2818                             "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2819                             le32toh(palive->umac_major),
2820                             le32toh(palive->umac_minor));
2821         }
2822
2823         return TRUE;
2824 }
2825
2826 static int
2827 iwm_wait_phy_db_entry(struct iwm_softc *sc,
2828         struct iwm_rx_packet *pkt, void *data)
2829 {
2830         struct iwm_phy_db *phy_db = data;
2831
2832         if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2833                 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2834                         device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2835                             __func__, pkt->hdr.code);
2836                 }
2837                 return TRUE;
2838         }
2839
2840         if (iwm_phy_db_set_section(phy_db, pkt)) {
2841                 device_printf(sc->sc_dev,
2842                     "%s: iwm_phy_db_set_section failed\n", __func__);
2843         }
2844
2845         return FALSE;
2846 }
2847
2848 static int
2849 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2850         enum iwm_ucode_type ucode_type)
2851 {
2852         struct iwm_notification_wait alive_wait;
2853         struct iwm_mvm_alive_data alive_data;
2854         const struct iwm_fw_sects *fw;
2855         enum iwm_ucode_type old_type = sc->cur_ucode;
2856         int error;
2857         static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2858
2859         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2860                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2861                         error);
2862                 return error;
2863         }
2864         fw = &sc->sc_fw.fw_sects[ucode_type];
2865         sc->cur_ucode = ucode_type;
2866         sc->ucode_loaded = FALSE;
2867
2868         memset(&alive_data, 0, sizeof(alive_data));
2869         iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2870                                    alive_cmd, nitems(alive_cmd),
2871                                    iwm_alive_fn, &alive_data);
2872
2873         error = iwm_start_fw(sc, fw);
2874         if (error) {
2875                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2876                 sc->cur_ucode = old_type;
2877                 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2878                 return error;
2879         }
2880
2881         /*
2882          * Some things may run in the background now, but we
2883          * just wait for the ALIVE notification here.
2884          */
2885         IWM_UNLOCK(sc);
2886         error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2887                                       IWM_MVM_UCODE_ALIVE_TIMEOUT);
2888         IWM_LOCK(sc);
2889         if (error) {
2890                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2891                         uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2892                         if (iwm_nic_lock(sc)) {
2893                                 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2894                                 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2895                                 iwm_nic_unlock(sc);
2896                         }
2897                         device_printf(sc->sc_dev,
2898                             "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2899                             a, b);
2900                 }
2901                 sc->cur_ucode = old_type;
2902                 return error;
2903         }
2904
2905         if (!alive_data.valid) {
2906                 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2907                     __func__);
2908                 sc->cur_ucode = old_type;
2909                 return EIO;
2910         }
2911
2912         iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2913
2914         /*
2915          * configure and operate fw paging mechanism.
2916          * driver configures the paging flow only once, CPU2 paging image
2917          * included in the IWM_UCODE_INIT image.
2918          */
2919         if (fw->paging_mem_size) {
2920                 error = iwm_save_fw_paging(sc, fw);
2921                 if (error) {
2922                         device_printf(sc->sc_dev,
2923                             "%s: failed to save the FW paging image\n",
2924                             __func__);
2925                         return error;
2926                 }
2927
2928                 error = iwm_send_paging_cmd(sc, fw);
2929                 if (error) {
2930                         device_printf(sc->sc_dev,
2931                             "%s: failed to send the paging cmd\n", __func__);
2932                         iwm_free_fw_paging(sc);
2933                         return error;
2934                 }
2935         }
2936
2937         if (!error)
2938                 sc->ucode_loaded = TRUE;
2939         return error;
2940 }
2941
2942 /*
2943  * mvm misc bits
2944  */
2945
2946 /*
2947  * follows iwlwifi/fw.c
2948  */
2949 static int
2950 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2951 {
2952         struct iwm_notification_wait calib_wait;
2953         static const uint16_t init_complete[] = {
2954                 IWM_INIT_COMPLETE_NOTIF,
2955                 IWM_CALIB_RES_NOTIF_PHY_DB
2956         };
2957         int ret;
2958
2959         /* do not operate with rfkill switch turned on */
2960         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2961                 device_printf(sc->sc_dev,
2962                     "radio is disabled by hardware switch\n");
2963                 return EPERM;
2964         }
2965
2966         iwm_init_notification_wait(sc->sc_notif_wait,
2967                                    &calib_wait,
2968                                    init_complete,
2969                                    nitems(init_complete),
2970                                    iwm_wait_phy_db_entry,
2971                                    sc->sc_phy_db);
2972
2973         /* Will also start the device */
2974         ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2975         if (ret) {
2976                 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2977                     ret);
2978                 goto error;
2979         }
2980
2981         if (justnvm) {
2982                 /* Read nvm */
2983                 ret = iwm_nvm_init(sc);
2984                 if (ret) {
2985                         device_printf(sc->sc_dev, "failed to read nvm\n");
2986                         goto error;
2987                 }
2988                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2989                 goto error;
2990         }
2991
2992         ret = iwm_send_bt_init_conf(sc);
2993         if (ret) {
2994                 device_printf(sc->sc_dev,
2995                     "failed to send bt coex configuration: %d\n", ret);
2996                 goto error;
2997         }
2998
2999         /* Init Smart FIFO. */
3000         ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
3001         if (ret)
3002                 goto error;
3003
3004         /* Send TX valid antennas before triggering calibrations */
3005         ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3006         if (ret) {
3007                 device_printf(sc->sc_dev,
3008                     "failed to send antennas before calibration: %d\n", ret);
3009                 goto error;
3010         }
3011
3012         /*
3013          * Send phy configurations command to init uCode
3014          * to start the 16.0 uCode init image internal calibrations.
3015          */
3016         ret = iwm_send_phy_cfg_cmd(sc);
3017         if (ret) {
3018                 device_printf(sc->sc_dev,
3019                     "%s: Failed to run INIT calibrations: %d\n",
3020                     __func__, ret);
3021                 goto error;
3022         }
3023
3024         /*
3025          * Nothing to do but wait for the init complete notification
3026          * from the firmware.
3027          */
3028         IWM_UNLOCK(sc);
3029         ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3030             IWM_MVM_UCODE_CALIB_TIMEOUT);
3031         IWM_LOCK(sc);
3032
3033
3034         goto out;
3035
3036 error:
3037         iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3038 out:
3039         return ret;
3040 }
3041
3042 /*
3043  * receive side
3044  */
3045
3046 /* (re)stock rx ring, called at init-time and at runtime */
3047 static int
3048 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3049 {
3050         struct iwm_rx_ring *ring = &sc->rxq;
3051         struct iwm_rx_data *data = &ring->data[idx];
3052         struct mbuf *m;
3053         bus_dmamap_t dmamap;
3054         bus_dma_segment_t seg;
3055         int nsegs, error;
3056
3057         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3058         if (m == NULL)
3059                 return ENOBUFS;
3060
3061         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3062         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3063             &seg, &nsegs, BUS_DMA_NOWAIT);
3064         if (error != 0) {
3065                 device_printf(sc->sc_dev,
3066                     "%s: can't map mbuf, error %d\n", __func__, error);
3067                 m_freem(m);
3068                 return error;
3069         }
3070
3071         if (data->m != NULL)
3072                 bus_dmamap_unload(ring->data_dmat, data->map);
3073
3074         /* Swap ring->spare_map with data->map */
3075         dmamap = data->map;
3076         data->map = ring->spare_map;
3077         ring->spare_map = dmamap;
3078
3079         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3080         data->m = m;
3081
3082         /* Update RX descriptor. */
3083         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3084         ring->desc[idx] = htole32(seg.ds_addr >> 8);
3085         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3086             BUS_DMASYNC_PREWRITE);
3087
3088         return 0;
3089 }
3090
3091 /* iwlwifi: mvm/rx.c */
3092 /*
3093  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3094  * values are reported by the fw as positive values - need to negate
3095  * to obtain their dBM.  Account for missing antennas by replacing 0
3096  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3097  */
3098 static int
3099 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3100 {
3101         int energy_a, energy_b, energy_c, max_energy;
3102         uint32_t val;
3103
3104         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3105         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3106             IWM_RX_INFO_ENERGY_ANT_A_POS;
3107         energy_a = energy_a ? -energy_a : -256;
3108         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3109             IWM_RX_INFO_ENERGY_ANT_B_POS;
3110         energy_b = energy_b ? -energy_b : -256;
3111         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3112             IWM_RX_INFO_ENERGY_ANT_C_POS;
3113         energy_c = energy_c ? -energy_c : -256;
3114         max_energy = MAX(energy_a, energy_b);
3115         max_energy = MAX(max_energy, energy_c);
3116
3117         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3118             "energy In A %d B %d C %d , and max %d\n",
3119             energy_a, energy_b, energy_c, max_energy);
3120
3121         return max_energy;
3122 }
3123
3124 static void
3125 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3126 {
3127         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3128
3129         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3130
3131         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3132 }
3133
3134 /*
3135  * Retrieve the average noise (in dBm) among receivers.
3136  */
3137 static int
3138 iwm_get_noise(struct iwm_softc *sc,
3139     const struct iwm_mvm_statistics_rx_non_phy *stats)
3140 {
3141         int i, total, nbant, noise;
3142
3143         total = nbant = noise = 0;
3144         for (i = 0; i < 3; i++) {
3145                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3146                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3147                     __func__,
3148                     i,
3149                     noise);
3150
3151                 if (noise) {
3152                         total += noise;
3153                         nbant++;
3154                 }
3155         }
3156
3157         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3158             __func__, nbant, total);
3159 #if 0
3160         /* There should be at least one antenna but check anyway. */
3161         return (nbant == 0) ? -127 : (total / nbant) - 107;
3162 #else
3163         /* For now, just hard-code it to -96 to be safe */
3164         return (-96);
3165 #endif
3166 }
3167
3168 static void
3169 iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3170 {
3171         struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3172
3173         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3174         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3175 }
3176
3177 /*
3178  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3179  *
3180  * Handles the actual data of the Rx packet from the fw
3181  */
3182 static boolean_t
3183 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3184         boolean_t stolen)
3185 {
3186         struct ieee80211com *ic = &sc->sc_ic;
3187         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3188         struct ieee80211_frame *wh;
3189         struct ieee80211_node *ni;
3190         struct ieee80211_rx_stats rxs;
3191         struct iwm_rx_phy_info *phy_info;
3192         struct iwm_rx_mpdu_res_start *rx_res;
3193         struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3194         uint32_t len;
3195         uint32_t rx_pkt_status;
3196         int rssi;
3197
3198         phy_info = &sc->sc_last_phy_info;
3199         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3200         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3201         len = le16toh(rx_res->byte_count);
3202         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3203
3204         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3205                 device_printf(sc->sc_dev,
3206                     "dsp size out of range [0,20]: %d\n",
3207                     phy_info->cfg_phy_cnt);
3208                 goto fail;
3209         }
3210
3211         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3212             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3213                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3214                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3215                 goto fail;
3216         }
3217
3218         rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3219
3220         /* Map it to relative value */
3221         rssi = rssi - sc->sc_noise;
3222
3223         /* replenish ring for the buffer we're going to feed to the sharks */
3224         if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3225                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3226                     __func__);
3227                 goto fail;
3228         }
3229
3230         m->m_data = pkt->data + sizeof(*rx_res);
3231         m->m_pkthdr.len = m->m_len = len;
3232
3233         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3234             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3235
3236         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3237
3238         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3239             "%s: phy_info: channel=%d, flags=0x%08x\n",
3240             __func__,
3241             le16toh(phy_info->channel),
3242             le16toh(phy_info->phy_flags));
3243
3244         /*
3245          * Populate an RX state struct with the provided information.
3246          */
3247         bzero(&rxs, sizeof(rxs));
3248         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3249         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3250         rxs.c_ieee = le16toh(phy_info->channel);
3251         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3252                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3253         } else {
3254                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3255         }
3256
3257         /* rssi is in 1/2db units */
3258         rxs.rssi = rssi * 2;
3259         rxs.nf = sc->sc_noise;
3260
3261         if (ieee80211_radiotap_active_vap(vap)) {
3262                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3263
3264                 tap->wr_flags = 0;
3265                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3266                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3267                 tap->wr_chan_freq = htole16(rxs.c_freq);
3268                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3269                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3270                 tap->wr_dbm_antsignal = (int8_t)rssi;
3271                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3272                 tap->wr_tsft = phy_info->system_timestamp;
3273                 switch (phy_info->rate) {
3274                 /* CCK rates. */
3275                 case  10: tap->wr_rate =   2; break;
3276                 case  20: tap->wr_rate =   4; break;
3277                 case  55: tap->wr_rate =  11; break;
3278                 case 110: tap->wr_rate =  22; break;
3279                 /* OFDM rates. */
3280                 case 0xd: tap->wr_rate =  12; break;
3281                 case 0xf: tap->wr_rate =  18; break;
3282                 case 0x5: tap->wr_rate =  24; break;
3283                 case 0x7: tap->wr_rate =  36; break;
3284                 case 0x9: tap->wr_rate =  48; break;
3285                 case 0xb: tap->wr_rate =  72; break;
3286                 case 0x1: tap->wr_rate =  96; break;
3287                 case 0x3: tap->wr_rate = 108; break;
3288                 /* Unknown rate: should not happen. */
3289                 default:  tap->wr_rate =   0;
3290                 }
3291         }
3292
3293         IWM_UNLOCK(sc);
3294         if (ni != NULL) {
3295                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3296                 ieee80211_input_mimo(ni, m, &rxs);
3297                 ieee80211_free_node(ni);
3298         } else {
3299                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3300                 ieee80211_input_mimo_all(ic, m, &rxs);
3301         }
3302         IWM_LOCK(sc);
3303
3304         return TRUE;
3305
3306 fail:   counter_u64_add(ic->ic_ierrors, 1);
3307         return FALSE;
3308 }
3309
3310 static int
3311 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3312         struct iwm_node *in)
3313 {
3314         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3315         struct ieee80211_node *ni = &in->in_ni;
3316         struct ieee80211vap *vap = ni->ni_vap;
3317         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3318         int failack = tx_resp->failure_frame;
3319
3320         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3321
3322         /* Update rate control statistics. */
3323         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3324             __func__,
3325             (int) le16toh(tx_resp->status.status),
3326             (int) le16toh(tx_resp->status.sequence),
3327             tx_resp->frame_count,
3328             tx_resp->bt_kill_count,
3329             tx_resp->failure_rts,
3330             tx_resp->failure_frame,
3331             le32toh(tx_resp->initial_rate),
3332             (int) le16toh(tx_resp->wireless_media_time));
3333
3334         if (status != IWM_TX_STATUS_SUCCESS &&
3335             status != IWM_TX_STATUS_DIRECT_DONE) {
3336                 ieee80211_ratectl_tx_complete(vap, ni,
3337                     IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3338                 return (1);
3339         } else {
3340                 ieee80211_ratectl_tx_complete(vap, ni,
3341                     IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3342                 return (0);
3343         }
3344 }
3345
3346 static void
3347 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3348 {
3349         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3350         int idx = cmd_hdr->idx;
3351         int qid = cmd_hdr->qid;
3352         struct iwm_tx_ring *ring = &sc->txq[qid];
3353         struct iwm_tx_data *txd = &ring->data[idx];
3354         struct iwm_node *in = txd->in;
3355         struct mbuf *m = txd->m;
3356         int status;
3357
3358         KASSERT(txd->done == 0, ("txd not done"));
3359         KASSERT(txd->in != NULL, ("txd without node"));
3360         KASSERT(txd->m != NULL, ("txd without mbuf"));
3361
3362         sc->sc_tx_timer = 0;
3363
3364         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3365
3366         /* Unmap and free mbuf. */
3367         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3368         bus_dmamap_unload(ring->data_dmat, txd->map);
3369
3370         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3371             "free txd %p, in %p\n", txd, txd->in);
3372         txd->done = 1;
3373         txd->m = NULL;
3374         txd->in = NULL;
3375
3376         ieee80211_tx_complete(&in->in_ni, m, status);
3377
3378         if (--ring->queued < IWM_TX_RING_LOMARK) {
3379                 sc->qfullmsk &= ~(1 << ring->qid);
3380                 if (sc->qfullmsk == 0) {
3381                         iwm_start(sc);
3382                 }
3383         }
3384 }
3385
3386 /*
3387  * transmit side
3388  */
3389
3390 /*
3391  * Process a "command done" firmware notification.  This is where we wakeup
3392  * processes waiting for a synchronous command completion.
3393  * from if_iwn
3394  */
3395 static void
3396 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3397 {
3398         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3399         struct iwm_tx_data *data;
3400
3401         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3402                 return; /* Not a command ack. */
3403         }
3404
3405         /* XXX wide commands? */
3406         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3407             "cmd notification type 0x%x qid %d idx %d\n",
3408             pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3409
3410         data = &ring->data[pkt->hdr.idx];
3411
3412         /* If the command was mapped in an mbuf, free it. */
3413         if (data->m != NULL) {
3414                 bus_dmamap_sync(ring->data_dmat, data->map,
3415                     BUS_DMASYNC_POSTWRITE);
3416                 bus_dmamap_unload(ring->data_dmat, data->map);
3417                 m_freem(data->m);
3418                 data->m = NULL;
3419         }
3420         wakeup(&ring->desc[pkt->hdr.idx]);
3421
3422         if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3423                 device_printf(sc->sc_dev,
3424                     "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3425                     __func__, pkt->hdr.idx, ring->queued, ring->cur);
3426                 /* XXX call iwm_force_nmi() */
3427         }
3428
3429         KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3430         ring->queued--;
3431         if (ring->queued == 0)
3432                 iwm_pcie_clear_cmd_in_flight(sc);
3433 }
3434
3435 #if 0
3436 /*
3437  * necessary only for block ack mode
3438  */
3439 void
3440 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3441         uint16_t len)
3442 {
3443         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3444         uint16_t w_val;
3445
3446         scd_bc_tbl = sc->sched_dma.vaddr;
3447
3448         len += 8; /* magic numbers came naturally from paris */
3449         len = roundup(len, 4) / 4;
3450
3451         w_val = htole16(sta_id << 12 | len);
3452
3453         /* Update TX scheduler. */
3454         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3455         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3456             BUS_DMASYNC_PREWRITE);
3457
3458         /* I really wonder what this is ?!? */
3459         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3460                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3461                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3462                     BUS_DMASYNC_PREWRITE);
3463         }
3464 }
3465 #endif
3466
3467 /*
3468  * Take an 802.11 (non-n) rate, find the relevant rate
3469  * table entry.  return the index into in_ridx[].
3470  *
3471  * The caller then uses that index back into in_ridx
3472  * to figure out the rate index programmed /into/
3473  * the firmware for this given node.
3474  */
3475 static int
3476 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3477     uint8_t rate)
3478 {
3479         int i;
3480         uint8_t r;
3481
3482         for (i = 0; i < nitems(in->in_ridx); i++) {
3483                 r = iwm_rates[in->in_ridx[i]].rate;
3484                 if (rate == r)
3485                         return (i);
3486         }
3487
3488         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3489             "%s: couldn't find an entry for rate=%d\n",
3490             __func__,
3491             rate);
3492
3493         /* XXX Return the first */
3494         /* XXX TODO: have it return the /lowest/ */
3495         return (0);
3496 }
3497
3498 static int
3499 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3500 {
3501         int i;
3502
3503         for (i = 0; i < nitems(iwm_rates); i++) {
3504                 if (iwm_rates[i].rate == rate)
3505                         return (i);
3506         }
3507         /* XXX error? */
3508         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3509             "%s: couldn't find an entry for rate=%d\n",
3510             __func__,
3511             rate);
3512         return (0);
3513 }
3514
3515 /*
3516  * Fill in the rate related information for a transmit command.
3517  */
3518 static const struct iwm_rate *
3519 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3520         struct mbuf *m, struct iwm_tx_cmd *tx)
3521 {
3522         struct ieee80211_node *ni = &in->in_ni;
3523         struct ieee80211_frame *wh;
3524         const struct ieee80211_txparam *tp = ni->ni_txparms;
3525         const struct iwm_rate *rinfo;
3526         int type;
3527         int ridx, rate_flags;
3528
3529         wh = mtod(m, struct ieee80211_frame *);
3530         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3531
3532         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3533         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3534
3535         if (type == IEEE80211_FC0_TYPE_MGT) {
3536                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3537                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3538                     "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3539         } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3540                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3541                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3542                     "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3543         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3544                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3545                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3546                     "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3547         } else if (m->m_flags & M_EAPOL) {
3548                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3549                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3550                     "%s: EAPOL\n", __func__);
3551         } else if (type == IEEE80211_FC0_TYPE_DATA) {
3552                 int i;
3553
3554                 /* for data frames, use RS table */
3555                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3556                 /* XXX pass pktlen */
3557                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3558                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3559                 ridx = in->in_ridx[i];
3560
3561                 /* This is the index into the programmed table */
3562                 tx->initial_rate_index = i;
3563                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3564
3565                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3566                     "%s: start with i=%d, txrate %d\n",
3567                     __func__, i, iwm_rates[ridx].rate);
3568         } else {
3569                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3570                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3571                     __func__, tp->mgmtrate);
3572         }
3573
3574         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3575             "%s: frame type=%d txrate %d\n",
3576                 __func__, type, iwm_rates[ridx].rate);
3577
3578         rinfo = &iwm_rates[ridx];
3579
3580         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3581             __func__, ridx,
3582             rinfo->rate,
3583             !! (IWM_RIDX_IS_CCK(ridx))
3584             );
3585
3586         /* XXX TODO: hard-coded TX antenna? */
3587         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3588         if (IWM_RIDX_IS_CCK(ridx))
3589                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3590         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3591
3592         return rinfo;
3593 }
3594
3595 #define TB0_SIZE 16
3596 static int
3597 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3598 {
3599         struct ieee80211com *ic = &sc->sc_ic;
3600         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3601         struct iwm_node *in = IWM_NODE(ni);
3602         struct iwm_tx_ring *ring;
3603         struct iwm_tx_data *data;
3604         struct iwm_tfd *desc;
3605         struct iwm_device_cmd *cmd;
3606         struct iwm_tx_cmd *tx;
3607         struct ieee80211_frame *wh;
3608         struct ieee80211_key *k = NULL;
3609         struct mbuf *m1;
3610         const struct iwm_rate *rinfo;
3611         uint32_t flags;
3612         u_int hdrlen;
3613         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3614         int nsegs;
3615         uint8_t tid, type;
3616         int i, totlen, error, pad;
3617
3618         wh = mtod(m, struct ieee80211_frame *);
3619         hdrlen = ieee80211_anyhdrsize(wh);
3620         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3621         tid = 0;
3622         ring = &sc->txq[ac];
3623         desc = &ring->desc[ring->cur];
3624         memset(desc, 0, sizeof(*desc));
3625         data = &ring->data[ring->cur];
3626
3627         /* Fill out iwm_tx_cmd to send to the firmware */
3628         cmd = &ring->cmd[ring->cur];
3629         cmd->hdr.code = IWM_TX_CMD;
3630         cmd->hdr.flags = 0;
3631         cmd->hdr.qid = ring->qid;
3632         cmd->hdr.idx = ring->cur;
3633
3634         tx = (void *)cmd->data;
3635         memset(tx, 0, sizeof(*tx));
3636
3637         rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3638
3639         /* Encrypt the frame if need be. */
3640         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3641                 /* Retrieve key for TX && do software encryption. */
3642                 k = ieee80211_crypto_encap(ni, m);
3643                 if (k == NULL) {
3644                         m_freem(m);
3645                         return (ENOBUFS);
3646                 }
3647                 /* 802.11 header may have moved. */
3648                 wh = mtod(m, struct ieee80211_frame *);
3649         }
3650
3651         if (ieee80211_radiotap_active_vap(vap)) {
3652                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3653
3654                 tap->wt_flags = 0;
3655                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3656                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3657                 tap->wt_rate = rinfo->rate;
3658                 if (k != NULL)
3659                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3660                 ieee80211_radiotap_tx(vap, m);
3661         }
3662
3663
3664         totlen = m->m_pkthdr.len;
3665
3666         flags = 0;
3667         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3668                 flags |= IWM_TX_CMD_FLG_ACK;
3669         }
3670
3671         if (type == IEEE80211_FC0_TYPE_DATA
3672             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3673             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3674                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3675         }
3676
3677         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3678             type != IEEE80211_FC0_TYPE_DATA)
3679                 tx->sta_id = sc->sc_aux_sta.sta_id;
3680         else
3681                 tx->sta_id = IWM_STATION_ID;
3682
3683         if (type == IEEE80211_FC0_TYPE_MGT) {
3684                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3685
3686                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3687                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3688                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3689                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3690                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3691                 } else {
3692                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3693                 }
3694         } else {
3695                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3696         }
3697
3698         if (hdrlen & 3) {
3699                 /* First segment length must be a multiple of 4. */
3700                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3701                 pad = 4 - (hdrlen & 3);
3702         } else
3703                 pad = 0;
3704
3705         tx->driver_txop = 0;
3706         tx->next_frame_len = 0;
3707
3708         tx->len = htole16(totlen);
3709         tx->tid_tspec = tid;
3710         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3711
3712         /* Set physical address of "scratch area". */
3713         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3714         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3715
3716         /* Copy 802.11 header in TX command. */
3717         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3718
3719         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3720
3721         tx->sec_ctl = 0;
3722         tx->tx_flags |= htole32(flags);
3723
3724         /* Trim 802.11 header. */
3725         m_adj(m, hdrlen);
3726         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3727             segs, &nsegs, BUS_DMA_NOWAIT);
3728         if (error != 0) {
3729                 if (error != EFBIG) {
3730                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3731                             error);
3732                         m_freem(m);
3733                         return error;
3734                 }
3735                 /* Too many DMA segments, linearize mbuf. */
3736                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3737                 if (m1 == NULL) {
3738                         device_printf(sc->sc_dev,
3739                             "%s: could not defrag mbuf\n", __func__);
3740                         m_freem(m);
3741                         return (ENOBUFS);
3742                 }
3743                 m = m1;
3744
3745                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3746                     segs, &nsegs, BUS_DMA_NOWAIT);
3747                 if (error != 0) {
3748                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3749                             error);
3750                         m_freem(m);
3751                         return error;
3752                 }
3753         }
3754         data->m = m;
3755         data->in = in;
3756         data->done = 0;
3757
3758         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3759             "sending txd %p, in %p\n", data, data->in);
3760         KASSERT(data->in != NULL, ("node is NULL"));
3761
3762         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3763             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3764             ring->qid, ring->cur, totlen, nsegs,
3765             le32toh(tx->tx_flags),
3766             le32toh(tx->rate_n_flags),
3767             tx->initial_rate_index
3768             );
3769
3770         /* Fill TX descriptor. */
3771         desc->num_tbs = 2 + nsegs;
3772
3773         desc->tbs[0].lo = htole32(data->cmd_paddr);
3774         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3775             (TB0_SIZE << 4);
3776         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3777         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3778             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3779               + hdrlen + pad - TB0_SIZE) << 4);
3780
3781         /* Other DMA segments are for data payload. */
3782         for (i = 0; i < nsegs; i++) {
3783                 seg = &segs[i];
3784                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3785                 desc->tbs[i+2].hi_n_len = \
3786                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3787                     | ((seg->ds_len) << 4);
3788         }
3789
3790         bus_dmamap_sync(ring->data_dmat, data->map,
3791             BUS_DMASYNC_PREWRITE);
3792         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3793             BUS_DMASYNC_PREWRITE);
3794         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3795             BUS_DMASYNC_PREWRITE);
3796
3797 #if 0
3798         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3799 #endif
3800
3801         /* Kick TX ring. */
3802         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3803         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3804
3805         /* Mark TX ring as full if we reach a certain threshold. */
3806         if (++ring->queued > IWM_TX_RING_HIMARK) {
3807                 sc->qfullmsk |= 1 << ring->qid;
3808         }
3809
3810         return 0;
3811 }
3812
3813 static int
3814 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3815     const struct ieee80211_bpf_params *params)
3816 {
3817         struct ieee80211com *ic = ni->ni_ic;
3818         struct iwm_softc *sc = ic->ic_softc;
3819         int error = 0;
3820
3821         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3822             "->%s begin\n", __func__);
3823
3824         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3825                 m_freem(m);
3826                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3827                     "<-%s not RUNNING\n", __func__);
3828                 return (ENETDOWN);
3829         }
3830
3831         IWM_LOCK(sc);
3832         /* XXX fix this */
3833         if (params == NULL) {
3834                 error = iwm_tx(sc, m, ni, 0);
3835         } else {
3836                 error = iwm_tx(sc, m, ni, 0);
3837         }
3838         sc->sc_tx_timer = 5;
3839         IWM_UNLOCK(sc);
3840
3841         return (error);
3842 }
3843
3844 /*
3845  * mvm/tx.c
3846  */
3847
3848 /*
3849  * Note that there are transports that buffer frames before they reach
3850  * the firmware. This means that after flush_tx_path is called, the
3851  * queue might not be empty. The race-free way to handle this is to:
3852  * 1) set the station as draining
3853  * 2) flush the Tx path
3854  * 3) wait for the transport queues to be empty
3855  */
3856 int
3857 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3858 {
3859         int ret;
3860         struct iwm_tx_path_flush_cmd flush_cmd = {
3861                 .queues_ctl = htole32(tfd_msk),
3862                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3863         };
3864
3865         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3866             sizeof(flush_cmd), &flush_cmd);
3867         if (ret)
3868                 device_printf(sc->sc_dev,
3869                     "Flushing tx queue failed: %d\n", ret);
3870         return ret;
3871 }
3872
3873 /*
3874  * BEGIN mvm/quota.c
3875  */
3876
3877 static int
3878 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3879 {
3880         struct iwm_time_quota_cmd cmd;
3881         int i, idx, ret, num_active_macs, quota, quota_rem;
3882         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3883         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3884         uint16_t id;
3885
3886         memset(&cmd, 0, sizeof(cmd));
3887
3888         /* currently, PHY ID == binding ID */
3889         if (ivp) {
3890                 id = ivp->phy_ctxt->id;
3891                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3892                 colors[id] = ivp->phy_ctxt->color;
3893
3894                 if (1)
3895                         n_ifs[id] = 1;
3896         }
3897
3898         /*
3899          * The FW's scheduling session consists of
3900          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3901          * equally between all the bindings that require quota
3902          */
3903         num_active_macs = 0;
3904         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3905                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3906                 num_active_macs += n_ifs[i];
3907         }
3908
3909         quota = 0;
3910         quota_rem = 0;
3911         if (num_active_macs) {
3912                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3913                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3914         }
3915
3916         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3917                 if (colors[i] < 0)
3918                         continue;
3919
3920                 cmd.quotas[idx].id_and_color =
3921                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3922
3923                 if (n_ifs[i] <= 0) {
3924                         cmd.quotas[idx].quota = htole32(0);
3925                         cmd.quotas[idx].max_duration = htole32(0);
3926                 } else {
3927                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3928                         cmd.quotas[idx].max_duration = htole32(0);
3929                 }
3930                 idx++;
3931         }
3932
3933         /* Give the remainder of the session to the first binding */
3934         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3935
3936         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3937             sizeof(cmd), &cmd);
3938         if (ret)
3939                 device_printf(sc->sc_dev,
3940                     "%s: Failed to send quota: %d\n", __func__, ret);
3941         return ret;
3942 }
3943
3944 /*
3945  * END mvm/quota.c
3946  */
3947
3948 /*
3949  * ieee80211 routines
3950  */
3951
3952 /*
3953  * Change to AUTH state in 80211 state machine.  Roughly matches what
3954  * Linux does in bss_info_changed().
3955  */
3956 static int
3957 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3958 {
3959         struct ieee80211_node *ni;
3960         struct iwm_node *in;
3961         struct iwm_vap *iv = IWM_VAP(vap);
3962         uint32_t duration;
3963         int error;
3964
3965         /*
3966          * XXX i have a feeling that the vap node is being
3967          * freed from underneath us. Grr.
3968          */
3969         ni = ieee80211_ref_node(vap->iv_bss);
3970         in = IWM_NODE(ni);
3971         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3972             "%s: called; vap=%p, bss ni=%p\n",
3973             __func__,
3974             vap,
3975             ni);
3976
3977         in->in_assoc = 0;
3978
3979         /*
3980          * Firmware bug - it'll crash if the beacon interval is less
3981          * than 16. We can't avoid connecting at all, so refuse the
3982          * station state change, this will cause net80211 to abandon
3983          * attempts to connect to this AP, and eventually wpa_s will
3984          * blacklist the AP...
3985          */
3986         if (ni->ni_intval < 16) {
3987                 device_printf(sc->sc_dev,
3988                     "AP %s beacon interval is %d, refusing due to firmware bug!\n",
3989                     ether_sprintf(ni->ni_bssid), ni->ni_intval);
3990                 error = EINVAL;
3991                 goto out;
3992         }
3993
3994         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3995         if (error != 0)
3996                 return error;
3997
3998         error = iwm_allow_mcast(vap, sc);
3999         if (error) {
4000                 device_printf(sc->sc_dev,
4001                     "%s: failed to set multicast\n", __func__);
4002                 goto out;
4003         }
4004
4005         /*
4006          * This is where it deviates from what Linux does.
4007          *
4008          * Linux iwlwifi doesn't reset the nic each time, nor does it
4009          * call ctxt_add() here.  Instead, it adds it during vap creation,
4010          * and always does a mac_ctx_changed().
4011          *
4012          * The openbsd port doesn't attempt to do that - it reset things
4013          * at odd states and does the add here.
4014          *
4015          * So, until the state handling is fixed (ie, we never reset
4016          * the NIC except for a firmware failure, which should drag
4017          * the NIC back to IDLE, re-setup and re-add all the mac/phy
4018          * contexts that are required), let's do a dirty hack here.
4019          */
4020         if (iv->is_uploaded) {
4021                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4022                         device_printf(sc->sc_dev,
4023                             "%s: failed to update MAC\n", __func__);
4024                         goto out;
4025                 }
4026                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4027                     in->in_ni.ni_chan, 1, 1)) != 0) {
4028                         device_printf(sc->sc_dev,
4029                             "%s: failed update phy ctxt\n", __func__);
4030                         goto out;
4031                 }
4032                 iv->phy_ctxt = &sc->sc_phyctxt[0];
4033
4034                 if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4035                         device_printf(sc->sc_dev,
4036                             "%s: binding update cmd\n", __func__);
4037                         goto out;
4038                 }
4039                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4040                         device_printf(sc->sc_dev,
4041                             "%s: failed to update sta\n", __func__);
4042                         goto out;
4043                 }
4044         } else {
4045                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4046                         device_printf(sc->sc_dev,
4047                             "%s: failed to add MAC\n", __func__);
4048                         goto out;
4049                 }
4050                 if ((error = iwm_mvm_power_update_mac(sc)) != 0) {
4051                         device_printf(sc->sc_dev,
4052                             "%s: failed to update power management\n",
4053                             __func__);
4054                         goto out;
4055                 }
4056                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4057                     in->in_ni.ni_chan, 1, 1)) != 0) {
4058                         device_printf(sc->sc_dev,
4059                             "%s: failed add phy ctxt!\n", __func__);
4060                         error = ETIMEDOUT;
4061                         goto out;
4062                 }
4063                 iv->phy_ctxt = &sc->sc_phyctxt[0];
4064
4065                 if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4066                         device_printf(sc->sc_dev,
4067                             "%s: binding add cmd\n", __func__);
4068                         goto out;
4069                 }
4070                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4071                         device_printf(sc->sc_dev,
4072                             "%s: failed to add sta\n", __func__);
4073                         goto out;
4074                 }
4075         }
4076
4077         /*
4078          * Prevent the FW from wandering off channel during association
4079          * by "protecting" the session with a time event.
4080          */
4081         /* XXX duration is in units of TU, not MS */
4082         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4083         iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4084         DELAY(100);
4085
4086         error = 0;
4087 out:
4088         ieee80211_free_node(ni);
4089         return (error);
4090 }
4091
4092 static int
4093 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4094 {
4095         struct iwm_node *in = IWM_NODE(vap->iv_bss);
4096         int error;
4097
4098         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4099                 device_printf(sc->sc_dev,
4100                     "%s: failed to update STA\n", __func__);
4101                 return error;
4102         }
4103
4104         in->in_assoc = 1;
4105         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4106                 device_printf(sc->sc_dev,
4107                     "%s: failed to update MAC\n", __func__);
4108                 return error;
4109         }
4110
4111         return 0;
4112 }
4113
4114 static int
4115 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4116 {
4117         uint32_t tfd_msk;
4118
4119         /*
4120          * Ok, so *technically* the proper set of calls for going
4121          * from RUN back to SCAN is:
4122          *
4123          * iwm_mvm_power_mac_disable(sc, in);
4124          * iwm_mvm_mac_ctxt_changed(sc, vap);
4125          * iwm_mvm_rm_sta(sc, in);
4126          * iwm_mvm_update_quotas(sc, NULL);
4127          * iwm_mvm_mac_ctxt_changed(sc, in);
4128          * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4129          * iwm_mvm_mac_ctxt_remove(sc, in);
4130          *
4131          * However, that freezes the device not matter which permutations
4132          * and modifications are attempted.  Obviously, this driver is missing
4133          * something since it works in the Linux driver, but figuring out what
4134          * is missing is a little more complicated.  Now, since we're going
4135          * back to nothing anyway, we'll just do a complete device reset.
4136          * Up your's, device!
4137          */
4138         /*
4139          * Just using 0xf for the queues mask is fine as long as we only
4140          * get here from RUN state.
4141          */
4142         tfd_msk = 0xf;
4143         iwm_xmit_queue_drain(sc);
4144         iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4145         /*
4146          * We seem to get away with just synchronously sending the
4147          * IWM_TXPATH_FLUSH command.
4148          */
4149 //      iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4150         iwm_stop_device(sc);
4151         iwm_init_hw(sc);
4152         if (in)
4153                 in->in_assoc = 0;
4154         return 0;
4155
4156 #if 0
4157         int error;
4158
4159         iwm_mvm_power_mac_disable(sc, in);
4160
4161         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4162                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4163                 return error;
4164         }
4165
4166         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4167                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4168                 return error;
4169         }
4170         error = iwm_mvm_rm_sta(sc, in);
4171         in->in_assoc = 0;
4172         iwm_mvm_update_quotas(sc, NULL);
4173         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4174                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4175                 return error;
4176         }
4177         iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4178
4179         iwm_mvm_mac_ctxt_remove(sc, in);
4180
4181         return error;
4182 #endif
4183 }
4184
4185 static struct ieee80211_node *
4186 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4187 {
4188         return malloc(sizeof (struct iwm_node), M_80211_NODE,
4189             M_NOWAIT | M_ZERO);
4190 }
4191
4192 uint8_t
4193 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4194 {
4195         int i;
4196         uint8_t rval;
4197
4198         for (i = 0; i < rs->rs_nrates; i++) {
4199                 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4200                 if (rval == iwm_rates[ridx].rate)
4201                         return rs->rs_rates[i];
4202         }
4203
4204         return 0;
4205 }
4206
4207 static void
4208 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4209 {
4210         struct ieee80211_node *ni = &in->in_ni;
4211         struct iwm_lq_cmd *lq = &in->in_lq;
4212         int nrates = ni->ni_rates.rs_nrates;
4213         int i, ridx, tab = 0;
4214 //      int txant = 0;
4215
4216         if (nrates > nitems(lq->rs_table)) {
4217                 device_printf(sc->sc_dev,
4218                     "%s: node supports %d rates, driver handles "
4219                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4220                 return;
4221         }
4222         if (nrates == 0) {
4223                 device_printf(sc->sc_dev,
4224                     "%s: node supports 0 rates, odd!\n", __func__);
4225                 return;
4226         }
4227
4228         /*
4229          * XXX .. and most of iwm_node is not initialised explicitly;
4230          * it's all just 0x0 passed to the firmware.
4231          */
4232
4233         /* first figure out which rates we should support */
4234         /* XXX TODO: this isn't 11n aware /at all/ */
4235         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4236         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4237             "%s: nrates=%d\n", __func__, nrates);
4238
4239         /*
4240          * Loop over nrates and populate in_ridx from the highest
4241          * rate to the lowest rate.  Remember, in_ridx[] has
4242          * IEEE80211_RATE_MAXSIZE entries!
4243          */
4244         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4245                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4246
4247                 /* Map 802.11 rate to HW rate index. */
4248                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4249                         if (iwm_rates[ridx].rate == rate)
4250                                 break;
4251                 if (ridx > IWM_RIDX_MAX) {
4252                         device_printf(sc->sc_dev,
4253                             "%s: WARNING: device rate for %d not found!\n",
4254                             __func__, rate);
4255                 } else {
4256                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4257                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4258                             __func__,
4259                             i,
4260                             rate,
4261                             ridx);
4262                         in->in_ridx[i] = ridx;
4263                 }
4264         }
4265
4266         /* then construct a lq_cmd based on those */
4267         memset(lq, 0, sizeof(*lq));
4268         lq->sta_id = IWM_STATION_ID;
4269
4270         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4271         if (ni->ni_flags & IEEE80211_NODE_HT)
4272                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4273
4274         /*
4275          * are these used? (we don't do SISO or MIMO)
4276          * need to set them to non-zero, though, or we get an error.
4277          */
4278         lq->single_stream_ant_msk = 1;
4279         lq->dual_stream_ant_msk = 1;
4280
4281         /*
4282          * Build the actual rate selection table.
4283          * The lowest bits are the rates.  Additionally,
4284          * CCK needs bit 9 to be set.  The rest of the bits
4285          * we add to the table select the tx antenna
4286          * Note that we add the rates in the highest rate first
4287          * (opposite of ni_rates).
4288          */
4289         /*
4290          * XXX TODO: this should be looping over the min of nrates
4291          * and LQ_MAX_RETRY_NUM.  Sigh.
4292          */
4293         for (i = 0; i < nrates; i++) {
4294                 int nextant;
4295
4296 #if 0
4297                 if (txant == 0)
4298                         txant = iwm_mvm_get_valid_tx_ant(sc);
4299                 nextant = 1<<(ffs(txant)-1);
4300                 txant &= ~nextant;
4301 #else
4302                 nextant = iwm_mvm_get_valid_tx_ant(sc);
4303 #endif
4304                 /*
4305                  * Map the rate id into a rate index into
4306                  * our hardware table containing the
4307                  * configuration to use for this rate.
4308                  */
4309                 ridx = in->in_ridx[i];
4310                 tab = iwm_rates[ridx].plcp;
4311                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4312                 if (IWM_RIDX_IS_CCK(ridx))
4313                         tab |= IWM_RATE_MCS_CCK_MSK;
4314                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4315                     "station rate i=%d, rate=%d, hw=%x\n",
4316                     i, iwm_rates[ridx].rate, tab);
4317                 lq->rs_table[i] = htole32(tab);
4318         }
4319         /* then fill the rest with the lowest possible rate */
4320         for (i = nrates; i < nitems(lq->rs_table); i++) {
4321                 KASSERT(tab != 0, ("invalid tab"));
4322                 lq->rs_table[i] = htole32(tab);
4323         }
4324 }
4325
4326 static int
4327 iwm_media_change(struct ifnet *ifp)
4328 {
4329         struct ieee80211vap *vap = ifp->if_softc;
4330         struct ieee80211com *ic = vap->iv_ic;
4331         struct iwm_softc *sc = ic->ic_softc;
4332         int error;
4333
4334         error = ieee80211_media_change(ifp);
4335         if (error != ENETRESET)
4336                 return error;
4337
4338         IWM_LOCK(sc);
4339         if (ic->ic_nrunning > 0) {
4340                 iwm_stop(sc);
4341                 iwm_init(sc);
4342         }
4343         IWM_UNLOCK(sc);
4344         return error;
4345 }
4346
4347
4348 static int
4349 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4350 {
4351         struct iwm_vap *ivp = IWM_VAP(vap);
4352         struct ieee80211com *ic = vap->iv_ic;
4353         struct iwm_softc *sc = ic->ic_softc;
4354         struct iwm_node *in;
4355         int error;
4356
4357         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4358             "switching state %s -> %s\n",
4359             ieee80211_state_name[vap->iv_state],
4360             ieee80211_state_name[nstate]);
4361         IEEE80211_UNLOCK(ic);
4362         IWM_LOCK(sc);
4363
4364         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4365                 iwm_led_blink_stop(sc);
4366
4367         /* disable beacon filtering if we're hopping out of RUN */
4368         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4369                 iwm_mvm_disable_beacon_filter(sc);
4370
4371                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4372                         in->in_assoc = 0;
4373
4374                 if (nstate == IEEE80211_S_INIT) {
4375                         IWM_UNLOCK(sc);
4376                         IEEE80211_LOCK(ic);
4377                         error = ivp->iv_newstate(vap, nstate, arg);
4378                         IEEE80211_UNLOCK(ic);
4379                         IWM_LOCK(sc);
4380                         iwm_release(sc, NULL);
4381                         IWM_UNLOCK(sc);
4382                         IEEE80211_LOCK(ic);
4383                         return error;
4384                 }
4385
4386                 /*
4387                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4388                  * above then the card will be completely reinitialized,
4389                  * so the driver must do everything necessary to bring the card
4390                  * from INIT to SCAN.
4391                  *
4392                  * Additionally, upon receiving deauth frame from AP,
4393                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4394                  * state. This will also fail with this driver, so bring the FSM
4395                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4396                  *
4397                  * XXX TODO: fix this for FreeBSD!
4398                  */
4399                 if (nstate == IEEE80211_S_SCAN ||
4400                     nstate == IEEE80211_S_AUTH ||
4401                     nstate == IEEE80211_S_ASSOC) {
4402                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4403                             "Force transition to INIT; MGT=%d\n", arg);
4404                         IWM_UNLOCK(sc);
4405                         IEEE80211_LOCK(ic);
4406                         /* Always pass arg as -1 since we can't Tx right now. */
4407                         /*
4408                          * XXX arg is just ignored anyway when transitioning
4409                          *     to IEEE80211_S_INIT.
4410                          */
4411                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4412                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4413                             "Going INIT->SCAN\n");
4414                         nstate = IEEE80211_S_SCAN;
4415                         IEEE80211_UNLOCK(ic);
4416                         IWM_LOCK(sc);
4417                 }
4418         }
4419
4420         switch (nstate) {
4421         case IEEE80211_S_INIT:
4422         case IEEE80211_S_SCAN:
4423                 if (vap->iv_state == IEEE80211_S_AUTH ||
4424                     vap->iv_state == IEEE80211_S_ASSOC) {
4425                         int myerr;
4426                         IWM_UNLOCK(sc);
4427                         IEEE80211_LOCK(ic);
4428                         myerr = ivp->iv_newstate(vap, nstate, arg);
4429                         IEEE80211_UNLOCK(ic);
4430                         IWM_LOCK(sc);
4431                         error = iwm_mvm_rm_sta(sc, vap, FALSE);
4432                         if (error) {
4433                                 device_printf(sc->sc_dev,
4434                                     "%s: Failed to remove station: %d\n",
4435                                     __func__, error);
4436                         }
4437                         error = iwm_mvm_mac_ctxt_changed(sc, vap);
4438                         if (error) {
4439                                 device_printf(sc->sc_dev,
4440                                     "%s: Failed to change mac context: %d\n",
4441                                     __func__, error);
4442                         }
4443                         error = iwm_mvm_binding_remove_vif(sc, ivp);
4444                         if (error) {
4445                                 device_printf(sc->sc_dev,
4446                                     "%s: Failed to remove channel ctx: %d\n",
4447                                     __func__, error);
4448                         }
4449                         ivp->phy_ctxt = NULL;
4450                         IWM_UNLOCK(sc);
4451                         IEEE80211_LOCK(ic);
4452                         return myerr;
4453                 }
4454                 break;
4455
4456         case IEEE80211_S_AUTH:
4457                 if ((error = iwm_auth(vap, sc)) != 0) {
4458                         device_printf(sc->sc_dev,
4459                             "%s: could not move to auth state: %d\n",
4460                             __func__, error);
4461                         break;
4462                 }
4463                 break;
4464
4465         case IEEE80211_S_ASSOC:
4466                 if ((error = iwm_assoc(vap, sc)) != 0) {
4467                         device_printf(sc->sc_dev,
4468                             "%s: failed to associate: %d\n", __func__,
4469                             error);
4470                         break;
4471                 }
4472                 break;
4473
4474         case IEEE80211_S_RUN:
4475                 /* Update the association state, now we have it all */
4476                 /* (eg associd comes in at this point */
4477                 error = iwm_assoc(vap, sc);
4478                 if (error != 0) {
4479                         device_printf(sc->sc_dev,
4480                             "%s: failed to update association state: %d\n",
4481                             __func__,
4482                             error);
4483                         break;
4484                 }
4485
4486                 in = IWM_NODE(vap->iv_bss);
4487                 iwm_mvm_enable_beacon_filter(sc, in);
4488                 iwm_mvm_power_update_mac(sc);
4489                 iwm_mvm_update_quotas(sc, ivp);
4490                 iwm_setrates(sc, in);
4491
4492                 if ((error = iwm_mvm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4493                         device_printf(sc->sc_dev,
4494                             "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4495                 }
4496
4497                 iwm_mvm_led_enable(sc);
4498                 break;
4499
4500         default:
4501                 break;
4502         }
4503         IWM_UNLOCK(sc);
4504         IEEE80211_LOCK(ic);
4505
4506         return (ivp->iv_newstate(vap, nstate, arg));
4507 }
4508
4509 void
4510 iwm_endscan_cb(void *arg, int pending)
4511 {
4512         struct iwm_softc *sc = arg;
4513         struct ieee80211com *ic = &sc->sc_ic;
4514
4515         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4516             "%s: scan ended\n",
4517             __func__);
4518
4519         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4520 }
4521
4522 /*
4523  * Aging and idle timeouts for the different possible scenarios
4524  * in default configuration
4525  */
4526 static const uint32_t
4527 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4528         {
4529                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4530                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4531         },
4532         {
4533                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4534                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4535         },
4536         {
4537                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4538                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4539         },
4540         {
4541                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4542                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4543         },
4544         {
4545                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4546                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4547         },
4548 };
4549
4550 /*
4551  * Aging and idle timeouts for the different possible scenarios
4552  * in single BSS MAC configuration.
4553  */
4554 static const uint32_t
4555 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4556         {
4557                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4558                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4559         },
4560         {
4561                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4562                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4563         },
4564         {
4565                 htole32(IWM_SF_MCAST_AGING_TIMER),
4566                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4567         },
4568         {
4569                 htole32(IWM_SF_BA_AGING_TIMER),
4570                 htole32(IWM_SF_BA_IDLE_TIMER)
4571         },
4572         {
4573                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4574                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4575         },
4576 };
4577
4578 static void
4579 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4580     struct ieee80211_node *ni)
4581 {
4582         int i, j, watermark;
4583
4584         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4585
4586         /*
4587          * If we are in association flow - check antenna configuration
4588          * capabilities of the AP station, and choose the watermark accordingly.
4589          */
4590         if (ni) {
4591                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4592 #ifdef notyet
4593                         if (ni->ni_rxmcs[2] != 0)
4594                                 watermark = IWM_SF_W_MARK_MIMO3;
4595                         else if (ni->ni_rxmcs[1] != 0)
4596                                 watermark = IWM_SF_W_MARK_MIMO2;
4597                         else
4598 #endif
4599                                 watermark = IWM_SF_W_MARK_SISO;
4600                 } else {
4601                         watermark = IWM_SF_W_MARK_LEGACY;
4602                 }
4603         /* default watermark value for unassociated mode. */
4604         } else {
4605                 watermark = IWM_SF_W_MARK_MIMO2;
4606         }
4607         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4608
4609         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4610                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4611                         sf_cmd->long_delay_timeouts[i][j] =
4612                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4613                 }
4614         }
4615
4616         if (ni) {
4617                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4618                        sizeof(iwm_sf_full_timeout));
4619         } else {
4620                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4621                        sizeof(iwm_sf_full_timeout_def));
4622         }
4623 }
4624
4625 static int
4626 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4627 {
4628         struct ieee80211com *ic = &sc->sc_ic;
4629         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4630         struct iwm_sf_cfg_cmd sf_cmd = {
4631                 .state = htole32(IWM_SF_FULL_ON),
4632         };
4633         int ret = 0;
4634
4635         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4636                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4637
4638         switch (new_state) {
4639         case IWM_SF_UNINIT:
4640         case IWM_SF_INIT_OFF:
4641                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4642                 break;
4643         case IWM_SF_FULL_ON:
4644                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4645                 break;
4646         default:
4647                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4648                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4649                           new_state);
4650                 return EINVAL;
4651         }
4652
4653         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4654                                    sizeof(sf_cmd), &sf_cmd);
4655         return ret;
4656 }
4657
4658 static int
4659 iwm_send_bt_init_conf(struct iwm_softc *sc)
4660 {
4661         struct iwm_bt_coex_cmd bt_cmd;
4662
4663         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4664         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4665
4666         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4667             &bt_cmd);
4668 }
4669
4670 static boolean_t
4671 iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4672 {
4673         boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4674         boolean_t tlv_lar = fw_has_capa(&sc->ucode_capa,
4675                                         IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4676
4677         if (iwm_lar_disable)
4678                 return FALSE;
4679
4680         /*
4681          * Enable LAR only if it is supported by the FW (TLV) &&
4682          * enabled in the NVM
4683          */
4684         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4685                 return nvm_lar && tlv_lar;
4686         else
4687                 return tlv_lar;
4688 }
4689
4690 static boolean_t
4691 iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4692 {
4693         return fw_has_api(&sc->ucode_capa,
4694                           IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4695                fw_has_capa(&sc->ucode_capa,
4696                            IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4697 }
4698
4699 static int
4700 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4701 {
4702         struct iwm_mcc_update_cmd mcc_cmd;
4703         struct iwm_host_cmd hcmd = {
4704                 .id = IWM_MCC_UPDATE_CMD,
4705                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4706                 .data = { &mcc_cmd },
4707         };
4708         int ret;
4709 #ifdef IWM_DEBUG
4710         struct iwm_rx_packet *pkt;
4711         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4712         struct iwm_mcc_update_resp *mcc_resp;
4713         int n_channels;
4714         uint16_t mcc;
4715 #endif
4716         int resp_v2 = fw_has_capa(&sc->ucode_capa,
4717             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4718
4719         if (!iwm_mvm_is_lar_supported(sc)) {
4720                 IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4721                     __func__);
4722                 return 0;
4723         }
4724
4725         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4726         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4727         if (iwm_mvm_is_wifi_mcc_supported(sc))
4728                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4729         else
4730                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4731
4732         if (resp_v2)
4733                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4734         else
4735                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4736
4737         IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4738             "send MCC update to FW with '%c%c' src = %d\n",
4739             alpha2[0], alpha2[1], mcc_cmd.source_id);
4740
4741         ret = iwm_send_cmd(sc, &hcmd);
4742         if (ret)
4743                 return ret;
4744
4745 #ifdef IWM_DEBUG
4746         pkt = hcmd.resp_pkt;
4747
4748         /* Extract MCC response */
4749         if (resp_v2) {
4750                 mcc_resp = (void *)pkt->data;
4751                 mcc = mcc_resp->mcc;
4752                 n_channels =  le32toh(mcc_resp->n_channels);
4753         } else {
4754                 mcc_resp_v1 = (void *)pkt->data;
4755                 mcc = mcc_resp_v1->mcc;
4756                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4757         }
4758
4759         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4760         if (mcc == 0)
4761                 mcc = 0x3030;  /* "00" - world */
4762
4763         IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4764             "regulatory domain '%c%c' (%d channels available)\n",
4765             mcc >> 8, mcc & 0xff, n_channels);
4766 #endif
4767         iwm_free_resp(sc, &hcmd);
4768
4769         return 0;
4770 }
4771
4772 static void
4773 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4774 {
4775         struct iwm_host_cmd cmd = {
4776                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4777                 .len = { sizeof(uint32_t), },
4778                 .data = { &backoff, },
4779         };
4780
4781         if (iwm_send_cmd(sc, &cmd) != 0) {
4782                 device_printf(sc->sc_dev,
4783                     "failed to change thermal tx backoff\n");
4784         }
4785 }
4786
4787 static int
4788 iwm_init_hw(struct iwm_softc *sc)
4789 {
4790         struct ieee80211com *ic = &sc->sc_ic;
4791         int error, i, ac;
4792
4793         if ((error = iwm_start_hw(sc)) != 0) {
4794                 printf("iwm_start_hw: failed %d\n", error);
4795                 return error;
4796         }
4797
4798         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4799                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4800                 return error;
4801         }
4802
4803         /*
4804          * should stop and start HW since that INIT
4805          * image just loaded
4806          */
4807         iwm_stop_device(sc);
4808         sc->sc_ps_disabled = FALSE;
4809         if ((error = iwm_start_hw(sc)) != 0) {
4810                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4811                 return error;
4812         }
4813
4814         /* omstart, this time with the regular firmware */
4815         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4816         if (error) {
4817                 device_printf(sc->sc_dev, "could not load firmware\n");
4818                 goto error;
4819         }
4820
4821         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4822                 device_printf(sc->sc_dev, "bt init conf failed\n");
4823                 goto error;
4824         }
4825
4826         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4827         if (error != 0) {
4828                 device_printf(sc->sc_dev, "antenna config failed\n");
4829                 goto error;
4830         }
4831
4832         /* Send phy db control command and then phy db calibration */
4833         if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4834                 goto error;
4835
4836         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4837                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4838                 goto error;
4839         }
4840
4841         /* Add auxiliary station for scanning */
4842         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4843                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4844                 goto error;
4845         }
4846
4847         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4848                 /*
4849                  * The channel used here isn't relevant as it's
4850                  * going to be overwritten in the other flows.
4851                  * For now use the first channel we have.
4852                  */
4853                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4854                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4855                         goto error;
4856         }
4857
4858         /* Initialize tx backoffs to the minimum. */
4859         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4860                 iwm_mvm_tt_tx_backoff(sc, 0);
4861
4862         error = iwm_mvm_power_update_device(sc);
4863         if (error)
4864                 goto error;
4865
4866         if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4867                 goto error;
4868
4869         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4870                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4871                         goto error;
4872         }
4873
4874         /* Enable Tx queues. */
4875         for (ac = 0; ac < WME_NUM_AC; ac++) {
4876                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4877                     iwm_mvm_ac_to_tx_fifo[ac]);
4878                 if (error)
4879                         goto error;
4880         }
4881
4882         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4883                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4884                 goto error;
4885         }
4886
4887         return 0;
4888
4889  error:
4890         iwm_stop_device(sc);
4891         return error;
4892 }
4893
4894 /* Allow multicast from our BSSID. */
4895 static int
4896 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4897 {
4898         struct ieee80211_node *ni = vap->iv_bss;
4899         struct iwm_mcast_filter_cmd *cmd;
4900         size_t size;
4901         int error;
4902
4903         size = roundup(sizeof(*cmd), 4);
4904         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4905         if (cmd == NULL)
4906                 return ENOMEM;
4907         cmd->filter_own = 1;
4908         cmd->port_id = 0;
4909         cmd->count = 0;
4910         cmd->pass_all = 1;
4911         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4912
4913         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4914             IWM_CMD_SYNC, size, cmd);
4915         free(cmd, M_DEVBUF);
4916
4917         return (error);
4918 }
4919
4920 /*
4921  * ifnet interfaces
4922  */
4923
4924 static void
4925 iwm_init(struct iwm_softc *sc)
4926 {
4927         int error;
4928
4929         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4930                 return;
4931         }
4932         sc->sc_generation++;
4933         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4934
4935         if ((error = iwm_init_hw(sc)) != 0) {
4936                 printf("iwm_init_hw failed %d\n", error);
4937                 iwm_stop(sc);
4938                 return;
4939         }
4940
4941         /*
4942          * Ok, firmware loaded and we are jogging
4943          */
4944         sc->sc_flags |= IWM_FLAG_HW_INITED;
4945         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4946 }
4947
4948 static int
4949 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4950 {
4951         struct iwm_softc *sc;
4952         int error;
4953
4954         sc = ic->ic_softc;
4955
4956         IWM_LOCK(sc);
4957         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4958                 IWM_UNLOCK(sc);
4959                 return (ENXIO);
4960         }
4961         error = mbufq_enqueue(&sc->sc_snd, m);
4962         if (error) {
4963                 IWM_UNLOCK(sc);
4964                 return (error);
4965         }
4966         iwm_start(sc);
4967         IWM_UNLOCK(sc);
4968         return (0);
4969 }
4970
4971 /*
4972  * Dequeue packets from sendq and call send.
4973  */
4974 static void
4975 iwm_start(struct iwm_softc *sc)
4976 {
4977         struct ieee80211_node *ni;
4978         struct mbuf *m;
4979         int ac = 0;
4980
4981         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4982         while (sc->qfullmsk == 0 &&
4983                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4984                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4985                 if (iwm_tx(sc, m, ni, ac) != 0) {
4986                         if_inc_counter(ni->ni_vap->iv_ifp,
4987                             IFCOUNTER_OERRORS, 1);
4988                         ieee80211_free_node(ni);
4989                         continue;
4990                 }
4991                 sc->sc_tx_timer = 15;
4992         }
4993         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4994 }
4995
4996 static void
4997 iwm_stop(struct iwm_softc *sc)
4998 {
4999
5000         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5001         sc->sc_flags |= IWM_FLAG_STOPPED;
5002         sc->sc_generation++;
5003         iwm_led_blink_stop(sc);
5004         sc->sc_tx_timer = 0;
5005         iwm_stop_device(sc);
5006         sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5007 }
5008
5009 static void
5010 iwm_watchdog(void *arg)
5011 {
5012         struct iwm_softc *sc = arg;
5013         struct ieee80211com *ic = &sc->sc_ic;
5014
5015         if (sc->sc_tx_timer > 0) {
5016                 if (--sc->sc_tx_timer == 0) {
5017                         device_printf(sc->sc_dev, "device timeout\n");
5018 #ifdef IWM_DEBUG
5019                         iwm_nic_error(sc);
5020 #endif
5021                         ieee80211_restart_all(ic);
5022                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5023                         return;
5024                 }
5025         }
5026         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5027 }
5028
5029 static void
5030 iwm_parent(struct ieee80211com *ic)
5031 {
5032         struct iwm_softc *sc = ic->ic_softc;
5033         int startall = 0;
5034
5035         IWM_LOCK(sc);
5036         if (ic->ic_nrunning > 0) {
5037                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5038                         iwm_init(sc);
5039                         startall = 1;
5040                 }
5041         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5042                 iwm_stop(sc);
5043         IWM_UNLOCK(sc);
5044         if (startall)
5045                 ieee80211_start_all(ic);
5046 }
5047
5048 /*
5049  * The interrupt side of things
5050  */
5051
5052 /*
5053  * error dumping routines are from iwlwifi/mvm/utils.c
5054  */
5055
5056 /*
5057  * Note: This structure is read from the device with IO accesses,
5058  * and the reading already does the endian conversion. As it is
5059  * read with uint32_t-sized accesses, any members with a different size
5060  * need to be ordered correctly though!
5061  */
5062 struct iwm_error_event_table {
5063         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5064         uint32_t error_id;              /* type of error */
5065         uint32_t trm_hw_status0;        /* TRM HW status */
5066         uint32_t trm_hw_status1;        /* TRM HW status */
5067         uint32_t blink2;                /* branch link */
5068         uint32_t ilink1;                /* interrupt link */
5069         uint32_t ilink2;                /* interrupt link */
5070         uint32_t data1;         /* error-specific data */
5071         uint32_t data2;         /* error-specific data */
5072         uint32_t data3;         /* error-specific data */
5073         uint32_t bcon_time;             /* beacon timer */
5074         uint32_t tsf_low;               /* network timestamp function timer */
5075         uint32_t tsf_hi;                /* network timestamp function timer */
5076         uint32_t gp1;           /* GP1 timer register */
5077         uint32_t gp2;           /* GP2 timer register */
5078         uint32_t fw_rev_type;   /* firmware revision type */
5079         uint32_t major;         /* uCode version major */
5080         uint32_t minor;         /* uCode version minor */
5081         uint32_t hw_ver;                /* HW Silicon version */
5082         uint32_t brd_ver;               /* HW board version */
5083         uint32_t log_pc;                /* log program counter */
5084         uint32_t frame_ptr;             /* frame pointer */
5085         uint32_t stack_ptr;             /* stack pointer */
5086         uint32_t hcmd;          /* last host command header */
5087         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
5088                                  * rxtx_flag */
5089         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
5090                                  * host_flag */
5091         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
5092                                  * enc_flag */
5093         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
5094                                  * time_flag */
5095         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
5096                                  * wico interrupt */
5097         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
5098         uint32_t wait_event;            /* wait event() caller address */
5099         uint32_t l2p_control;   /* L2pControlField */
5100         uint32_t l2p_duration;  /* L2pDurationField */
5101         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
5102         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
5103         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
5104                                  * (LMPM_PMG_SEL) */
5105         uint32_t u_timestamp;   /* indicate when the date and time of the
5106                                  * compilation */
5107         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
5108 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5109
5110 /*
5111  * UMAC error struct - relevant starting from family 8000 chip.
5112  * Note: This structure is read from the device with IO accesses,
5113  * and the reading already does the endian conversion. As it is
5114  * read with u32-sized accesses, any members with a different size
5115  * need to be ordered correctly though!
5116  */
5117 struct iwm_umac_error_event_table {
5118         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5119         uint32_t error_id;      /* type of error */
5120         uint32_t blink1;        /* branch link */
5121         uint32_t blink2;        /* branch link */
5122         uint32_t ilink1;        /* interrupt link */
5123         uint32_t ilink2;        /* interrupt link */
5124         uint32_t data1;         /* error-specific data */
5125         uint32_t data2;         /* error-specific data */
5126         uint32_t data3;         /* error-specific data */
5127         uint32_t umac_major;
5128         uint32_t umac_minor;
5129         uint32_t frame_pointer; /* core register 27*/
5130         uint32_t stack_pointer; /* core register 28 */
5131         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
5132         uint32_t nic_isr_pref;  /* ISR status register */
5133 } __packed;
5134
5135 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5136 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5137
5138 #ifdef IWM_DEBUG
5139 struct {
5140         const char *name;
5141         uint8_t num;
5142 } advanced_lookup[] = {
5143         { "NMI_INTERRUPT_WDG", 0x34 },
5144         { "SYSASSERT", 0x35 },
5145         { "UCODE_VERSION_MISMATCH", 0x37 },
5146         { "BAD_COMMAND", 0x38 },
5147         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5148         { "FATAL_ERROR", 0x3D },
5149         { "NMI_TRM_HW_ERR", 0x46 },
5150         { "NMI_INTERRUPT_TRM", 0x4C },
5151         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5152         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5153         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5154         { "NMI_INTERRUPT_HOST", 0x66 },
5155         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5156         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5157         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5158         { "ADVANCED_SYSASSERT", 0 },
5159 };
5160
5161 static const char *
5162 iwm_desc_lookup(uint32_t num)
5163 {
5164         int i;
5165
5166         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5167                 if (advanced_lookup[i].num == num)
5168                         return advanced_lookup[i].name;
5169
5170         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5171         return advanced_lookup[i].name;
5172 }
5173
5174 static void
5175 iwm_nic_umac_error(struct iwm_softc *sc)
5176 {
5177         struct iwm_umac_error_event_table table;
5178         uint32_t base;
5179
5180         base = sc->umac_error_event_table;
5181
5182         if (base < 0x800000) {
5183                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5184                     base);
5185                 return;
5186         }
5187
5188         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5189                 device_printf(sc->sc_dev, "reading errlog failed\n");
5190                 return;
5191         }
5192
5193         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5194                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5195                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5196                     sc->sc_flags, table.valid);
5197         }
5198
5199         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5200                 iwm_desc_lookup(table.error_id));
5201         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5202         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5203         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5204             table.ilink1);
5205         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5206             table.ilink2);
5207         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5208         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5209         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5210         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5211         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5212         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5213             table.frame_pointer);
5214         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5215             table.stack_pointer);
5216         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5217         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5218             table.nic_isr_pref);
5219 }
5220
5221 /*
5222  * Support for dumping the error log seemed like a good idea ...
5223  * but it's mostly hex junk and the only sensible thing is the
5224  * hw/ucode revision (which we know anyway).  Since it's here,
5225  * I'll just leave it in, just in case e.g. the Intel guys want to
5226  * help us decipher some "ADVANCED_SYSASSERT" later.
5227  */
5228 static void
5229 iwm_nic_error(struct iwm_softc *sc)
5230 {
5231         struct iwm_error_event_table table;
5232         uint32_t base;
5233
5234         device_printf(sc->sc_dev, "dumping device error log\n");
5235         base = sc->error_event_table;
5236         if (base < 0x800000) {
5237                 device_printf(sc->sc_dev,
5238                     "Invalid error log pointer 0x%08x\n", base);
5239                 return;
5240         }
5241
5242         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5243                 device_printf(sc->sc_dev, "reading errlog failed\n");
5244                 return;
5245         }
5246
5247         if (!table.valid) {
5248                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5249                 return;
5250         }
5251
5252         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5253                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5254                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5255                     sc->sc_flags, table.valid);
5256         }
5257
5258         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5259             iwm_desc_lookup(table.error_id));
5260         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5261             table.trm_hw_status0);
5262         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5263             table.trm_hw_status1);
5264         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5265         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5266         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5267         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5268         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5269         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5270         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5271         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5272         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5273         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5274         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5275         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5276             table.fw_rev_type);
5277         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5278         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5279         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5280         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5281         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5282         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5283         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5284         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5285         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5286         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5287         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5288         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5289         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5290         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5291         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5292         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5293         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5294         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5295         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5296
5297         if (sc->umac_error_event_table)
5298                 iwm_nic_umac_error(sc);
5299 }
5300 #endif
5301
5302 static void
5303 iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5304 {
5305         struct ieee80211com *ic = &sc->sc_ic;
5306         struct iwm_cmd_response *cresp;
5307         struct mbuf *m1;
5308         uint32_t offset = 0;
5309         uint32_t maxoff = IWM_RBUF_SIZE;
5310         uint32_t nextoff;
5311         boolean_t stolen = FALSE;
5312
5313 #define HAVEROOM(a)     \
5314     ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5315
5316         while (HAVEROOM(offset)) {
5317                 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5318                     offset);
5319                 int qid, idx, code, len;
5320
5321                 qid = pkt->hdr.qid;
5322                 idx = pkt->hdr.idx;
5323
5324                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5325
5326                 /*
5327                  * randomly get these from the firmware, no idea why.
5328                  * they at least seem harmless, so just ignore them for now
5329                  */
5330                 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5331                     pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5332                         break;
5333                 }
5334
5335                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5336                     "rx packet qid=%d idx=%d type=%x\n",
5337                     qid & ~0x80, pkt->hdr.idx, code);
5338
5339                 len = iwm_rx_packet_len(pkt);
5340                 len += sizeof(uint32_t); /* account for status word */
5341                 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5342
5343                 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5344
5345                 switch (code) {
5346                 case IWM_REPLY_RX_PHY_CMD:
5347                         iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5348                         break;
5349
5350                 case IWM_REPLY_RX_MPDU_CMD: {
5351                         /*
5352                          * If this is the last frame in the RX buffer, we
5353                          * can directly feed the mbuf to the sharks here.
5354                          */
5355                         struct iwm_rx_packet *nextpkt = mtodoff(m,
5356                             struct iwm_rx_packet *, nextoff);
5357                         if (!HAVEROOM(nextoff) ||
5358                             (nextpkt->hdr.code == 0 &&
5359                              (nextpkt->hdr.qid & ~0x80) == 0 &&
5360                              nextpkt->hdr.idx == 0) ||
5361                             (nextpkt->len_n_flags ==
5362                              htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5363                                 if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5364                                         stolen = FALSE;
5365                                         /* Make sure we abort the loop */
5366                                         nextoff = maxoff;
5367                                 }
5368                                 break;
5369                         }
5370
5371                         /*
5372                          * Use m_copym instead of m_split, because that
5373                          * makes it easier to keep a valid rx buffer in
5374                          * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5375                          *
5376                          * We need to start m_copym() at offset 0, to get the
5377                          * M_PKTHDR flag preserved.
5378                          */
5379                         m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5380                         if (m1) {
5381                                 if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5382                                         stolen = TRUE;
5383                                 else
5384                                         m_freem(m1);
5385                         }
5386                         break;
5387                 }
5388
5389                 case IWM_TX_CMD:
5390                         iwm_mvm_rx_tx_cmd(sc, pkt);
5391                         break;
5392
5393                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5394                         struct iwm_missed_beacons_notif *resp;
5395                         int missed;
5396
5397                         /* XXX look at mac_id to determine interface ID */
5398                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5399
5400                         resp = (void *)pkt->data;
5401                         missed = le32toh(resp->consec_missed_beacons);
5402
5403                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5404                             "%s: MISSED_BEACON: mac_id=%d, "
5405                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5406                             "num_rx=%d\n",
5407                             __func__,
5408                             le32toh(resp->mac_id),
5409                             le32toh(resp->consec_missed_beacons_since_last_rx),
5410                             le32toh(resp->consec_missed_beacons),
5411                             le32toh(resp->num_expected_beacons),
5412                             le32toh(resp->num_recvd_beacons));
5413
5414                         /* Be paranoid */
5415                         if (vap == NULL)
5416                                 break;
5417
5418                         /* XXX no net80211 locking? */
5419                         if (vap->iv_state == IEEE80211_S_RUN &&
5420                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5421                                 if (missed > vap->iv_bmissthreshold) {
5422                                         /* XXX bad locking; turn into task */
5423                                         IWM_UNLOCK(sc);
5424                                         ieee80211_beacon_miss(ic);
5425                                         IWM_LOCK(sc);
5426                                 }
5427                         }
5428
5429                         break;
5430                 }
5431
5432                 case IWM_MFUART_LOAD_NOTIFICATION:
5433                         break;
5434
5435                 case IWM_MVM_ALIVE:
5436                         break;
5437
5438                 case IWM_CALIB_RES_NOTIF_PHY_DB:
5439                         break;
5440
5441                 case IWM_STATISTICS_NOTIFICATION:
5442                         iwm_mvm_handle_rx_statistics(sc, pkt);
5443                         break;
5444
5445                 case IWM_NVM_ACCESS_CMD:
5446                 case IWM_MCC_UPDATE_CMD:
5447                         if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5448                                 memcpy(sc->sc_cmd_resp,
5449                                     pkt, sizeof(sc->sc_cmd_resp));
5450                         }
5451                         break;
5452
5453                 case IWM_MCC_CHUB_UPDATE_CMD: {
5454                         struct iwm_mcc_chub_notif *notif;
5455                         notif = (void *)pkt->data;
5456
5457                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5458                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5459                         sc->sc_fw_mcc[2] = '\0';
5460                         IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5461                             "fw source %d sent CC '%s'\n",
5462                             notif->source_id, sc->sc_fw_mcc);
5463                         break;
5464                 }
5465
5466                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5467                 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5468                                  IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5469                         struct iwm_dts_measurement_notif_v1 *notif;
5470
5471                         if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5472                                 device_printf(sc->sc_dev,
5473                                     "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5474                                 break;
5475                         }
5476                         notif = (void *)pkt->data;
5477                         IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5478                             "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5479                             notif->temp);
5480                         break;
5481                 }
5482
5483                 case IWM_PHY_CONFIGURATION_CMD:
5484                 case IWM_TX_ANT_CONFIGURATION_CMD:
5485                 case IWM_ADD_STA:
5486                 case IWM_MAC_CONTEXT_CMD:
5487                 case IWM_REPLY_SF_CFG_CMD:
5488                 case IWM_POWER_TABLE_CMD:
5489                 case IWM_PHY_CONTEXT_CMD:
5490                 case IWM_BINDING_CONTEXT_CMD:
5491                 case IWM_TIME_EVENT_CMD:
5492                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5493                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5494                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5495                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5496                 case IWM_SCAN_OFFLOAD_ABORT_CMD:
5497                 case IWM_REPLY_BEACON_FILTERING_CMD:
5498                 case IWM_MAC_PM_POWER_TABLE:
5499                 case IWM_TIME_QUOTA_CMD:
5500                 case IWM_REMOVE_STA:
5501                 case IWM_TXPATH_FLUSH:
5502                 case IWM_LQ_CMD:
5503                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5504                                  IWM_FW_PAGING_BLOCK_CMD):
5505                 case IWM_BT_CONFIG:
5506                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5507                         cresp = (void *)pkt->data;
5508                         if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5509                                 memcpy(sc->sc_cmd_resp,
5510                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5511                         }
5512                         break;
5513
5514                 /* ignore */
5515                 case IWM_PHY_DB_CMD:
5516                         break;
5517
5518                 case IWM_INIT_COMPLETE_NOTIF:
5519                         break;
5520
5521                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5522                         struct iwm_periodic_scan_complete *notif;
5523                         notif = (void *)pkt->data;
5524                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5525                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5526                                 ieee80211_runtask(ic, &sc->sc_es_task);
5527                         }
5528                         break;
5529                 }
5530
5531                 case IWM_SCAN_ITERATION_COMPLETE: {
5532                         struct iwm_lmac_scan_complete_notif *notif;
5533                         notif = (void *)pkt->data;
5534                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5535                         break;
5536                 }
5537  
5538                 case IWM_SCAN_COMPLETE_UMAC: {
5539                         struct iwm_umac_scan_complete *notif;
5540                         notif = (void *)pkt->data;
5541
5542                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5543                             "UMAC scan complete, status=0x%x\n",
5544                             notif->status);
5545                         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5546                                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5547                                 ieee80211_runtask(ic, &sc->sc_es_task);
5548                         }
5549                         break;
5550                 }
5551
5552                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5553                         struct iwm_umac_scan_iter_complete_notif *notif;
5554                         notif = (void *)pkt->data;
5555
5556                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5557                             "complete, status=0x%x, %d channels scanned\n",
5558                             notif->status, notif->scanned_channels);
5559                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5560                         break;
5561                 }
5562
5563                 case IWM_REPLY_ERROR: {
5564                         struct iwm_error_resp *resp;
5565                         resp = (void *)pkt->data;
5566
5567                         device_printf(sc->sc_dev,
5568                             "firmware error 0x%x, cmd 0x%x\n",
5569                             le32toh(resp->error_type),
5570                             resp->cmd_id);
5571                         break;
5572                 }
5573
5574                 case IWM_TIME_EVENT_NOTIFICATION: {
5575                         struct iwm_time_event_notif *notif;
5576                         notif = (void *)pkt->data;
5577
5578                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5579                             "TE notif status = 0x%x action = 0x%x\n",
5580                             notif->status, notif->action);
5581                         break;
5582                 }
5583
5584                 /*
5585                  * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5586                  * messages. Just ignore them for now.
5587                  */
5588                 case IWM_DEBUG_LOG_MSG:
5589                         break;
5590
5591                 case IWM_MCAST_FILTER_CMD:
5592                         break;
5593
5594                 case IWM_SCD_QUEUE_CFG: {
5595                         struct iwm_scd_txq_cfg_rsp *rsp;
5596                         rsp = (void *)pkt->data;
5597
5598                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5599                             "queue cfg token=0x%x sta_id=%d "
5600                             "tid=%d scd_queue=%d\n",
5601                             rsp->token, rsp->sta_id, rsp->tid,
5602                             rsp->scd_queue);
5603                         break;
5604                 }
5605
5606                 default:
5607                         device_printf(sc->sc_dev,
5608                             "frame %d/%d %x UNHANDLED (this should "
5609                             "not happen)\n", qid & ~0x80, idx,
5610                             pkt->len_n_flags);
5611                         break;
5612                 }
5613
5614                 /*
5615                  * Why test bit 0x80?  The Linux driver:
5616                  *
5617                  * There is one exception:  uCode sets bit 15 when it
5618                  * originates the response/notification, i.e. when the
5619                  * response/notification is not a direct response to a
5620                  * command sent by the driver.  For example, uCode issues
5621                  * IWM_REPLY_RX when it sends a received frame to the driver;
5622                  * it is not a direct response to any driver command.
5623                  *
5624                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5625                  * uses a slightly different format for pkt->hdr, and "qid"
5626                  * is actually the upper byte of a two-byte field.
5627                  */
5628                 if (!(qid & (1 << 7)))
5629                         iwm_cmd_done(sc, pkt);
5630
5631                 offset = nextoff;
5632         }
5633         if (stolen)
5634                 m_freem(m);
5635 #undef HAVEROOM
5636 }
5637
5638 /*
5639  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5640  * Basic structure from if_iwn
5641  */
5642 static void
5643 iwm_notif_intr(struct iwm_softc *sc)
5644 {
5645         uint16_t hw;
5646
5647         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5648             BUS_DMASYNC_POSTREAD);
5649
5650         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5651
5652         /*
5653          * Process responses
5654          */
5655         while (sc->rxq.cur != hw) {
5656                 struct iwm_rx_ring *ring = &sc->rxq;
5657                 struct iwm_rx_data *data = &ring->data[ring->cur];
5658
5659                 bus_dmamap_sync(ring->data_dmat, data->map,
5660                     BUS_DMASYNC_POSTREAD);
5661
5662                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5663                     "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5664                 iwm_handle_rxb(sc, data->m);
5665
5666                 ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5667         }
5668
5669         /*
5670          * Tell the firmware that it can reuse the ring entries that
5671          * we have just processed.
5672          * Seems like the hardware gets upset unless we align
5673          * the write by 8??
5674          */
5675         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5676         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5677 }
5678
5679 static void
5680 iwm_intr(void *arg)
5681 {
5682         struct iwm_softc *sc = arg;
5683         int handled = 0;
5684         int r1, r2, rv = 0;
5685         int isperiodic = 0;
5686
5687         IWM_LOCK(sc);
5688         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5689
5690         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5691                 uint32_t *ict = sc->ict_dma.vaddr;
5692                 int tmp;
5693
5694                 tmp = htole32(ict[sc->ict_cur]);
5695                 if (!tmp)
5696                         goto out_ena;
5697
5698                 /*
5699                  * ok, there was something.  keep plowing until we have all.
5700                  */
5701                 r1 = r2 = 0;
5702                 while (tmp) {
5703                         r1 |= tmp;
5704                         ict[sc->ict_cur] = 0;
5705                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5706                         tmp = htole32(ict[sc->ict_cur]);
5707                 }
5708
5709                 /* this is where the fun begins.  don't ask */
5710                 if (r1 == 0xffffffff)
5711                         r1 = 0;
5712
5713                 /* i am not expected to understand this */
5714                 if (r1 & 0xc0000)
5715                         r1 |= 0x8000;
5716                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5717         } else {
5718                 r1 = IWM_READ(sc, IWM_CSR_INT);
5719                 /* "hardware gone" (where, fishing?) */
5720                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5721                         goto out;
5722                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5723         }
5724         if (r1 == 0 && r2 == 0) {
5725                 goto out_ena;
5726         }
5727
5728         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5729
5730         /* Safely ignore these bits for debug checks below */
5731         r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5732
5733         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5734                 int i;
5735                 struct ieee80211com *ic = &sc->sc_ic;
5736                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5737
5738 #ifdef IWM_DEBUG
5739                 iwm_nic_error(sc);
5740 #endif
5741                 /* Dump driver status (TX and RX rings) while we're here. */
5742                 device_printf(sc->sc_dev, "driver status:\n");
5743                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5744                         struct iwm_tx_ring *ring = &sc->txq[i];
5745                         device_printf(sc->sc_dev,
5746                             "  tx ring %2d: qid=%-2d cur=%-3d "
5747                             "queued=%-3d\n",
5748                             i, ring->qid, ring->cur, ring->queued);
5749                 }
5750                 device_printf(sc->sc_dev,
5751                     "  rx ring: cur=%d\n", sc->rxq.cur);
5752                 device_printf(sc->sc_dev,
5753                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5754
5755                 /* Don't stop the device; just do a VAP restart */
5756                 IWM_UNLOCK(sc);
5757
5758                 if (vap == NULL) {
5759                         printf("%s: null vap\n", __func__);
5760                         return;
5761                 }
5762
5763                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5764                     "restarting\n", __func__, vap->iv_state);
5765
5766                 ieee80211_restart_all(ic);
5767                 return;
5768         }
5769
5770         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5771                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5772                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5773                 iwm_stop(sc);
5774                 rv = 1;
5775                 goto out;
5776         }
5777
5778         /* firmware chunk loaded */
5779         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5780                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5781                 handled |= IWM_CSR_INT_BIT_FH_TX;
5782                 sc->sc_fw_chunk_done = 1;
5783                 wakeup(&sc->sc_fw);
5784         }
5785
5786         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5787                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5788                 if (iwm_check_rfkill(sc)) {
5789                         device_printf(sc->sc_dev,
5790                             "%s: rfkill switch, disabling interface\n",
5791                             __func__);
5792                         iwm_stop(sc);
5793                 }
5794         }
5795
5796         /*
5797          * The Linux driver uses periodic interrupts to avoid races.
5798          * We cargo-cult like it's going out of fashion.
5799          */
5800         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5801                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5802                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5803                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5804                         IWM_WRITE_1(sc,
5805                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5806                 isperiodic = 1;
5807         }
5808
5809         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5810                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5811                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5812
5813                 iwm_notif_intr(sc);
5814
5815                 /* enable periodic interrupt, see above */
5816                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5817                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5818                             IWM_CSR_INT_PERIODIC_ENA);
5819         }
5820
5821         if (__predict_false(r1 & ~handled))
5822                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5823                     "%s: unhandled interrupts: %x\n", __func__, r1);
5824         rv = 1;
5825
5826  out_ena:
5827         iwm_restore_interrupts(sc);
5828  out:
5829         IWM_UNLOCK(sc);
5830         return;
5831 }
5832
5833 /*
5834  * Autoconf glue-sniffing
5835  */
5836 #define PCI_VENDOR_INTEL                0x8086
5837 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5838 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5839 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5840 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5841 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5842 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5843 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5844 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5845 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5846 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5847 #define PCI_PRODUCT_INTEL_WL_8265_1     0x24fd
5848
5849 static const struct iwm_devices {
5850         uint16_t                device;
5851         const struct iwm_cfg    *cfg;
5852 } iwm_devices[] = {
5853         { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5854         { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5855         { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5856         { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5857         { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5858         { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5859         { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5860         { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5861         { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5862         { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5863         { PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
5864 };
5865
5866 static int
5867 iwm_probe(device_t dev)
5868 {
5869         int i;
5870
5871         for (i = 0; i < nitems(iwm_devices); i++) {
5872                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5873                     pci_get_device(dev) == iwm_devices[i].device) {
5874                         device_set_desc(dev, iwm_devices[i].cfg->name);
5875                         return (BUS_PROBE_DEFAULT);
5876                 }
5877         }
5878
5879         return (ENXIO);
5880 }
5881
5882 static int
5883 iwm_dev_check(device_t dev)
5884 {
5885         struct iwm_softc *sc;
5886         uint16_t devid;
5887         int i;
5888
5889         sc = device_get_softc(dev);
5890
5891         devid = pci_get_device(dev);
5892         for (i = 0; i < nitems(iwm_devices); i++) {
5893                 if (iwm_devices[i].device == devid) {
5894                         sc->cfg = iwm_devices[i].cfg;
5895                         return (0);
5896                 }
5897         }
5898         device_printf(dev, "unknown adapter type\n");
5899         return ENXIO;
5900 }
5901
5902 /* PCI registers */
5903 #define PCI_CFG_RETRY_TIMEOUT   0x041
5904
5905 static int
5906 iwm_pci_attach(device_t dev)
5907 {
5908         struct iwm_softc *sc;
5909         int count, error, rid;
5910         uint16_t reg;
5911
5912         sc = device_get_softc(dev);
5913
5914         /* We disable the RETRY_TIMEOUT register (0x41) to keep
5915          * PCI Tx retries from interfering with C3 CPU state */
5916         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5917
5918         /* Enable bus-mastering and hardware bug workaround. */
5919         pci_enable_busmaster(dev);
5920         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5921         /* if !MSI */
5922         if (reg & PCIM_STATUS_INTxSTATE) {
5923                 reg &= ~PCIM_STATUS_INTxSTATE;
5924         }
5925         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5926
5927         rid = PCIR_BAR(0);
5928         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5929             RF_ACTIVE);
5930         if (sc->sc_mem == NULL) {
5931                 device_printf(sc->sc_dev, "can't map mem space\n");
5932                 return (ENXIO);
5933         }
5934         sc->sc_st = rman_get_bustag(sc->sc_mem);
5935         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5936
5937         /* Install interrupt handler. */
5938         count = 1;
5939         rid = 0;
5940         if (pci_alloc_msi(dev, &count) == 0)
5941                 rid = 1;
5942         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5943             (rid != 0 ? 0 : RF_SHAREABLE));
5944         if (sc->sc_irq == NULL) {
5945                 device_printf(dev, "can't map interrupt\n");
5946                         return (ENXIO);
5947         }
5948         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5949             NULL, iwm_intr, sc, &sc->sc_ih);
5950         if (sc->sc_ih == NULL) {
5951                 device_printf(dev, "can't establish interrupt");
5952                         return (ENXIO);
5953         }
5954         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5955
5956         return (0);
5957 }
5958
5959 static void
5960 iwm_pci_detach(device_t dev)
5961 {
5962         struct iwm_softc *sc = device_get_softc(dev);
5963
5964         if (sc->sc_irq != NULL) {
5965                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5966                 bus_release_resource(dev, SYS_RES_IRQ,
5967                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5968                 pci_release_msi(dev);
5969         }
5970         if (sc->sc_mem != NULL)
5971                 bus_release_resource(dev, SYS_RES_MEMORY,
5972                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5973 }
5974
5975
5976
5977 static int
5978 iwm_attach(device_t dev)
5979 {
5980         struct iwm_softc *sc = device_get_softc(dev);
5981         struct ieee80211com *ic = &sc->sc_ic;
5982         int error;
5983         int txq_i, i;
5984
5985         sc->sc_dev = dev;
5986         sc->sc_attached = 1;
5987         IWM_LOCK_INIT(sc);
5988         mbufq_init(&sc->sc_snd, ifqmaxlen);
5989         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5990         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5991         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5992
5993         sc->sc_notif_wait = iwm_notification_wait_init(sc);
5994         if (sc->sc_notif_wait == NULL) {
5995                 device_printf(dev, "failed to init notification wait struct\n");
5996                 goto fail;
5997         }
5998
5999         /* Init phy db */
6000         sc->sc_phy_db = iwm_phy_db_init(sc);
6001         if (!sc->sc_phy_db) {
6002                 device_printf(dev, "Cannot init phy_db\n");
6003                 goto fail;
6004         }
6005
6006         /* PCI attach */
6007         error = iwm_pci_attach(dev);
6008         if (error != 0)
6009                 goto fail;
6010
6011         sc->sc_wantresp = -1;
6012
6013         /* Check device type */
6014         error = iwm_dev_check(dev);
6015         if (error != 0)
6016                 goto fail;
6017
6018         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6019         /*
6020          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6021          * changed, and now the revision step also includes bit 0-1 (no more
6022          * "dash" value). To keep hw_rev backwards compatible - we'll store it
6023          * in the old format.
6024          */
6025         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6026                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6027                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6028
6029         if (iwm_prepare_card_hw(sc) != 0) {
6030                 device_printf(dev, "could not initialize hardware\n");
6031                 goto fail;
6032         }
6033
6034         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6035                 int ret;
6036                 uint32_t hw_step;
6037
6038                 /*
6039                  * In order to recognize C step the driver should read the
6040                  * chip version id located at the AUX bus MISC address.
6041                  */
6042                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6043                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6044                 DELAY(2);
6045
6046                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6047                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6048                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6049                                    25000);
6050                 if (!ret) {
6051                         device_printf(sc->sc_dev,
6052                             "Failed to wake up the nic\n");
6053                         goto fail;
6054                 }
6055
6056                 if (iwm_nic_lock(sc)) {
6057                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6058                         hw_step |= IWM_ENABLE_WFPM;
6059                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6060                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6061                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6062                         if (hw_step == 0x3)
6063                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6064                                                 (IWM_SILICON_C_STEP << 2);
6065                         iwm_nic_unlock(sc);
6066                 } else {
6067                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
6068                         goto fail;
6069                 }
6070         }
6071
6072         /* special-case 7265D, it has the same PCI IDs. */
6073         if (sc->cfg == &iwm7265_cfg &&
6074             (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6075                 sc->cfg = &iwm7265d_cfg;
6076         }
6077
6078         /* Allocate DMA memory for firmware transfers. */
6079         if ((error = iwm_alloc_fwmem(sc)) != 0) {
6080                 device_printf(dev, "could not allocate memory for firmware\n");
6081                 goto fail;
6082         }
6083
6084         /* Allocate "Keep Warm" page. */
6085         if ((error = iwm_alloc_kw(sc)) != 0) {
6086                 device_printf(dev, "could not allocate keep warm page\n");
6087                 goto fail;
6088         }
6089
6090         /* We use ICT interrupts */
6091         if ((error = iwm_alloc_ict(sc)) != 0) {
6092                 device_printf(dev, "could not allocate ICT table\n");
6093                 goto fail;
6094         }
6095
6096         /* Allocate TX scheduler "rings". */
6097         if ((error = iwm_alloc_sched(sc)) != 0) {
6098                 device_printf(dev, "could not allocate TX scheduler rings\n");
6099                 goto fail;
6100         }
6101
6102         /* Allocate TX rings */
6103         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6104                 if ((error = iwm_alloc_tx_ring(sc,
6105                     &sc->txq[txq_i], txq_i)) != 0) {
6106                         device_printf(dev,
6107                             "could not allocate TX ring %d\n",
6108                             txq_i);
6109                         goto fail;
6110                 }
6111         }
6112
6113         /* Allocate RX ring. */
6114         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6115                 device_printf(dev, "could not allocate RX ring\n");
6116                 goto fail;
6117         }
6118
6119         /* Clear pending interrupts. */
6120         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6121
6122         ic->ic_softc = sc;
6123         ic->ic_name = device_get_nameunit(sc->sc_dev);
6124         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
6125         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
6126
6127         /* Set device capabilities. */
6128         ic->ic_caps =
6129             IEEE80211_C_STA |
6130             IEEE80211_C_WPA |           /* WPA/RSN */
6131             IEEE80211_C_WME |
6132             IEEE80211_C_PMGT |
6133             IEEE80211_C_SHSLOT |        /* short slot time supported */
6134             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
6135 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
6136             ;
6137         /* Advertise full-offload scanning */
6138         ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6139         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6140                 sc->sc_phyctxt[i].id = i;
6141                 sc->sc_phyctxt[i].color = 0;
6142                 sc->sc_phyctxt[i].ref = 0;
6143                 sc->sc_phyctxt[i].channel = NULL;
6144         }
6145
6146         /* Default noise floor */
6147         sc->sc_noise = -96;
6148
6149         /* Max RSSI */
6150         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6151
6152         sc->sc_preinit_hook.ich_func = iwm_preinit;
6153         sc->sc_preinit_hook.ich_arg = sc;
6154         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6155                 device_printf(dev, "config_intrhook_establish failed\n");
6156                 goto fail;
6157         }
6158
6159 #ifdef IWM_DEBUG
6160         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6161             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6162             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6163 #endif
6164
6165         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6166             "<-%s\n", __func__);
6167
6168         return 0;
6169
6170         /* Free allocated memory if something failed during attachment. */
6171 fail:
6172         iwm_detach_local(sc, 0);
6173
6174         return ENXIO;
6175 }
6176
6177 static int
6178 iwm_is_valid_ether_addr(uint8_t *addr)
6179 {
6180         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6181
6182         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6183                 return (FALSE);
6184
6185         return (TRUE);
6186 }
6187
6188 static int
6189 iwm_wme_update(struct ieee80211com *ic)
6190 {
6191 #define IWM_EXP2(x)     ((1 << (x)) - 1)        /* CWmin = 2^ECWmin - 1 */
6192         struct iwm_softc *sc = ic->ic_softc;
6193         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6194         struct iwm_vap *ivp = IWM_VAP(vap);
6195         struct iwm_node *in;
6196         struct wmeParams tmp[WME_NUM_AC];
6197         int aci, error;
6198
6199         if (vap == NULL)
6200                 return (0);
6201
6202         IEEE80211_LOCK(ic);
6203         for (aci = 0; aci < WME_NUM_AC; aci++)
6204                 tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6205         IEEE80211_UNLOCK(ic);
6206
6207         IWM_LOCK(sc);
6208         for (aci = 0; aci < WME_NUM_AC; aci++) {
6209                 const struct wmeParams *ac = &tmp[aci];
6210                 ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6211                 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6212                 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6213                 ivp->queue_params[aci].edca_txop =
6214                     IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6215         }
6216         ivp->have_wme = TRUE;
6217         if (ivp->is_uploaded && vap->iv_bss != NULL) {
6218                 in = IWM_NODE(vap->iv_bss);
6219                 if (in->in_assoc) {
6220                         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6221                                 device_printf(sc->sc_dev,
6222                                     "%s: failed to update MAC\n", __func__);
6223                         }
6224                 }
6225         }
6226         IWM_UNLOCK(sc);
6227
6228         return (0);
6229 #undef IWM_EXP2
6230 }
6231
6232 static void
6233 iwm_preinit(void *arg)
6234 {
6235         struct iwm_softc *sc = arg;
6236         device_t dev = sc->sc_dev;
6237         struct ieee80211com *ic = &sc->sc_ic;
6238         int error;
6239
6240         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6241             "->%s\n", __func__);
6242
6243         IWM_LOCK(sc);
6244         if ((error = iwm_start_hw(sc)) != 0) {
6245                 device_printf(dev, "could not initialize hardware\n");
6246                 IWM_UNLOCK(sc);
6247                 goto fail;
6248         }
6249
6250         error = iwm_run_init_mvm_ucode(sc, 1);
6251         iwm_stop_device(sc);
6252         if (error) {
6253                 IWM_UNLOCK(sc);
6254                 goto fail;
6255         }
6256         device_printf(dev,
6257             "hw rev 0x%x, fw ver %s, address %s\n",
6258             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6259             sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6260
6261         /* not all hardware can do 5GHz band */
6262         if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6263                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6264                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6265         IWM_UNLOCK(sc);
6266
6267         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6268             ic->ic_channels);
6269
6270         /*
6271          * At this point we've committed - if we fail to do setup,
6272          * we now also have to tear down the net80211 state.
6273          */
6274         ieee80211_ifattach(ic);
6275         ic->ic_vap_create = iwm_vap_create;
6276         ic->ic_vap_delete = iwm_vap_delete;
6277         ic->ic_raw_xmit = iwm_raw_xmit;
6278         ic->ic_node_alloc = iwm_node_alloc;
6279         ic->ic_scan_start = iwm_scan_start;
6280         ic->ic_scan_end = iwm_scan_end;
6281         ic->ic_update_mcast = iwm_update_mcast;
6282         ic->ic_getradiocaps = iwm_init_channel_map;
6283         ic->ic_set_channel = iwm_set_channel;
6284         ic->ic_scan_curchan = iwm_scan_curchan;
6285         ic->ic_scan_mindwell = iwm_scan_mindwell;
6286         ic->ic_wme.wme_update = iwm_wme_update;
6287         ic->ic_parent = iwm_parent;
6288         ic->ic_transmit = iwm_transmit;
6289         iwm_radiotap_attach(sc);
6290         if (bootverbose)
6291                 ieee80211_announce(ic);
6292
6293         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6294             "<-%s\n", __func__);
6295         config_intrhook_disestablish(&sc->sc_preinit_hook);
6296
6297         return;
6298 fail:
6299         config_intrhook_disestablish(&sc->sc_preinit_hook);
6300         iwm_detach_local(sc, 0);
6301 }
6302
6303 /*
6304  * Attach the interface to 802.11 radiotap.
6305  */
6306 static void
6307 iwm_radiotap_attach(struct iwm_softc *sc)
6308 {
6309         struct ieee80211com *ic = &sc->sc_ic;
6310
6311         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6312             "->%s begin\n", __func__);
6313         ieee80211_radiotap_attach(ic,
6314             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6315                 IWM_TX_RADIOTAP_PRESENT,
6316             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6317                 IWM_RX_RADIOTAP_PRESENT);
6318         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6319             "->%s end\n", __func__);
6320 }
6321
6322 static struct ieee80211vap *
6323 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6324     enum ieee80211_opmode opmode, int flags,
6325     const uint8_t bssid[IEEE80211_ADDR_LEN],
6326     const uint8_t mac[IEEE80211_ADDR_LEN])
6327 {
6328         struct iwm_vap *ivp;
6329         struct ieee80211vap *vap;
6330
6331         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6332                 return NULL;
6333         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6334         vap = &ivp->iv_vap;
6335         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6336         vap->iv_bmissthreshold = 10;            /* override default */
6337         /* Override with driver methods. */
6338         ivp->iv_newstate = vap->iv_newstate;
6339         vap->iv_newstate = iwm_newstate;
6340
6341         ivp->id = IWM_DEFAULT_MACID;
6342         ivp->color = IWM_DEFAULT_COLOR;
6343
6344         ivp->have_wme = FALSE;
6345
6346         ieee80211_ratectl_init(vap);
6347         /* Complete setup. */
6348         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6349             mac);
6350         ic->ic_opmode = opmode;
6351
6352         return vap;
6353 }
6354
6355 static void
6356 iwm_vap_delete(struct ieee80211vap *vap)
6357 {
6358         struct iwm_vap *ivp = IWM_VAP(vap);
6359
6360         ieee80211_ratectl_deinit(vap);
6361         ieee80211_vap_detach(vap);
6362         free(ivp, M_80211_VAP);
6363 }
6364
6365 static void
6366 iwm_xmit_queue_drain(struct iwm_softc *sc)
6367 {
6368         struct mbuf *m;
6369         struct ieee80211_node *ni;
6370
6371         while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6372                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6373                 ieee80211_free_node(ni);
6374                 m_freem(m);
6375         }
6376 }
6377
6378 static void
6379 iwm_scan_start(struct ieee80211com *ic)
6380 {
6381         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6382         struct iwm_softc *sc = ic->ic_softc;
6383         int error;
6384
6385         IWM_LOCK(sc);
6386         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6387                 /* This should not be possible */
6388                 device_printf(sc->sc_dev,
6389                     "%s: Previous scan not completed yet\n", __func__);
6390         }
6391         if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6392                 error = iwm_mvm_umac_scan(sc);
6393         else
6394                 error = iwm_mvm_lmac_scan(sc);
6395         if (error != 0) {
6396                 device_printf(sc->sc_dev, "could not initiate scan\n");
6397                 IWM_UNLOCK(sc);
6398                 ieee80211_cancel_scan(vap);
6399         } else {
6400                 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6401                 iwm_led_blink_start(sc);
6402                 IWM_UNLOCK(sc);
6403         }
6404 }
6405
6406 static void
6407 iwm_scan_end(struct ieee80211com *ic)
6408 {
6409         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6410         struct iwm_softc *sc = ic->ic_softc;
6411
6412         IWM_LOCK(sc);
6413         iwm_led_blink_stop(sc);
6414         if (vap->iv_state == IEEE80211_S_RUN)
6415                 iwm_mvm_led_enable(sc);
6416         if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6417                 /*
6418                  * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6419                  * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6420                  * taskqueue.
6421                  */
6422                 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6423                 iwm_mvm_scan_stop_wait(sc);
6424         }
6425         IWM_UNLOCK(sc);
6426
6427         /*
6428          * Make sure we don't race, if sc_es_task is still enqueued here.
6429          * This is to make sure that it won't call ieee80211_scan_done
6430          * when we have already started the next scan.
6431          */
6432         taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6433 }
6434
6435 static void
6436 iwm_update_mcast(struct ieee80211com *ic)
6437 {
6438 }
6439
6440 static void
6441 iwm_set_channel(struct ieee80211com *ic)
6442 {
6443 }
6444
6445 static void
6446 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6447 {
6448 }
6449
6450 static void
6451 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6452 {
6453         return;
6454 }
6455
6456 void
6457 iwm_init_task(void *arg1)
6458 {
6459         struct iwm_softc *sc = arg1;
6460
6461         IWM_LOCK(sc);
6462         while (sc->sc_flags & IWM_FLAG_BUSY)
6463                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6464         sc->sc_flags |= IWM_FLAG_BUSY;
6465         iwm_stop(sc);
6466         if (sc->sc_ic.ic_nrunning > 0)
6467                 iwm_init(sc);
6468         sc->sc_flags &= ~IWM_FLAG_BUSY;
6469         wakeup(&sc->sc_flags);
6470         IWM_UNLOCK(sc);
6471 }
6472
6473 static int
6474 iwm_resume(device_t dev)
6475 {
6476         struct iwm_softc *sc = device_get_softc(dev);
6477         int do_reinit = 0;
6478
6479         /*
6480          * We disable the RETRY_TIMEOUT register (0x41) to keep
6481          * PCI Tx retries from interfering with C3 CPU state.
6482          */
6483         pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6484         iwm_init_task(device_get_softc(dev));
6485
6486         IWM_LOCK(sc);
6487         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6488                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6489                 do_reinit = 1;
6490         }
6491         IWM_UNLOCK(sc);
6492
6493         if (do_reinit)
6494                 ieee80211_resume_all(&sc->sc_ic);
6495
6496         return 0;
6497 }
6498
6499 static int
6500 iwm_suspend(device_t dev)
6501 {
6502         int do_stop = 0;
6503         struct iwm_softc *sc = device_get_softc(dev);
6504
6505         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6506
6507         ieee80211_suspend_all(&sc->sc_ic);
6508
6509         if (do_stop) {
6510                 IWM_LOCK(sc);
6511                 iwm_stop(sc);
6512                 sc->sc_flags |= IWM_FLAG_SCANNING;
6513                 IWM_UNLOCK(sc);
6514         }
6515
6516         return (0);
6517 }
6518
6519 static int
6520 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6521 {
6522         struct iwm_fw_info *fw = &sc->sc_fw;
6523         device_t dev = sc->sc_dev;
6524         int i;
6525
6526         if (!sc->sc_attached)
6527                 return 0;
6528         sc->sc_attached = 0;
6529
6530         if (do_net80211)
6531                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6532
6533         callout_drain(&sc->sc_led_blink_to);
6534         callout_drain(&sc->sc_watchdog_to);
6535         iwm_stop_device(sc);
6536         if (do_net80211) {
6537                 IWM_LOCK(sc);
6538                 iwm_xmit_queue_drain(sc);
6539                 IWM_UNLOCK(sc);
6540                 ieee80211_ifdetach(&sc->sc_ic);
6541         }
6542
6543         iwm_phy_db_free(sc->sc_phy_db);
6544         sc->sc_phy_db = NULL;
6545
6546         iwm_free_nvm_data(sc->nvm_data);
6547
6548         /* Free descriptor rings */
6549         iwm_free_rx_ring(sc, &sc->rxq);
6550         for (i = 0; i < nitems(sc->txq); i++)
6551                 iwm_free_tx_ring(sc, &sc->txq[i]);
6552
6553         /* Free firmware */
6554         if (fw->fw_fp != NULL)
6555                 iwm_fw_info_free(fw);
6556
6557         /* Free scheduler */
6558         iwm_dma_contig_free(&sc->sched_dma);
6559         iwm_dma_contig_free(&sc->ict_dma);
6560         iwm_dma_contig_free(&sc->kw_dma);
6561         iwm_dma_contig_free(&sc->fw_dma);
6562
6563         iwm_free_fw_paging(sc);
6564
6565         /* Finished with the hardware - detach things */
6566         iwm_pci_detach(dev);
6567
6568         if (sc->sc_notif_wait != NULL) {
6569                 iwm_notification_wait_free(sc->sc_notif_wait);
6570                 sc->sc_notif_wait = NULL;
6571         }
6572
6573         IWM_LOCK_DESTROY(sc);
6574
6575         return (0);
6576 }
6577
6578 static int
6579 iwm_detach(device_t dev)
6580 {
6581         struct iwm_softc *sc = device_get_softc(dev);
6582
6583         return (iwm_detach_local(sc, 1));
6584 }
6585
6586 static device_method_t iwm_pci_methods[] = {
6587         /* Device interface */
6588         DEVMETHOD(device_probe,         iwm_probe),
6589         DEVMETHOD(device_attach,        iwm_attach),
6590         DEVMETHOD(device_detach,        iwm_detach),
6591         DEVMETHOD(device_suspend,       iwm_suspend),
6592         DEVMETHOD(device_resume,        iwm_resume),
6593
6594         DEVMETHOD_END
6595 };
6596
6597 static driver_t iwm_pci_driver = {
6598         "iwm",
6599         iwm_pci_methods,
6600         sizeof (struct iwm_softc)
6601 };
6602
6603 static devclass_t iwm_devclass;
6604
6605 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6606 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6607 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6608 MODULE_DEPEND(iwm, wlan, 1, 1, 1);