]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/iwm/if_iwm.c
[iwm] Add implementation of the notification wait api from iwlwifi.
[FreeBSD/FreeBSD.git] / sys / dev / iwm / if_iwm.c
1 /*      $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $     */
2
3 /*
4  * Copyright (c) 2014 genua mbh <info@genua.de>
5  * Copyright (c) 2014 Fixup Software Ltd.
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19
20 /*-
21  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22  * which were used as the reference documentation for this implementation.
23  *
24  * Driver version we are currently based off of is
25  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26  *
27  ***********************************************************************
28  *
29  * This file is provided under a dual BSD/GPLv2 license.  When using or
30  * redistributing this file, you may do so under either license.
31  *
32  * GPL LICENSE SUMMARY
33  *
34  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35  *
36  * This program is free software; you can redistribute it and/or modify
37  * it under the terms of version 2 of the GNU General Public License as
38  * published by the Free Software Foundation.
39  *
40  * This program is distributed in the hope that it will be useful, but
41  * WITHOUT ANY WARRANTY; without even the implied warranty of
42  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43  * General Public License for more details.
44  *
45  * You should have received a copy of the GNU General Public License
46  * along with this program; if not, write to the Free Software
47  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48  * USA
49  *
50  * The full GNU General Public License is included in this distribution
51  * in the file called COPYING.
52  *
53  * Contact Information:
54  *  Intel Linux Wireless <ilw@linux.intel.com>
55  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56  *
57  *
58  * BSD LICENSE
59  *
60  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61  * All rights reserved.
62  *
63  * Redistribution and use in source and binary forms, with or without
64  * modification, are permitted provided that the following conditions
65  * are met:
66  *
67  *  * Redistributions of source code must retain the above copyright
68  *    notice, this list of conditions and the following disclaimer.
69  *  * Redistributions in binary form must reproduce the above copyright
70  *    notice, this list of conditions and the following disclaimer in
71  *    the documentation and/or other materials provided with the
72  *    distribution.
73  *  * Neither the name Intel Corporation nor the names of its
74  *    contributors may be used to endorse or promote products derived
75  *    from this software without specific prior written permission.
76  *
77  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88  */
89
90 /*-
91  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92  *
93  * Permission to use, copy, modify, and distribute this software for any
94  * purpose with or without fee is hereby granted, provided that the above
95  * copyright notice and this permission notice appear in all copies.
96  *
97  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104  */
105 #include <sys/cdefs.h>
106 __FBSDID("$FreeBSD$");
107
108 #include "opt_wlan.h"
109
110 #include <sys/param.h>
111 #include <sys/bus.h>
112 #include <sys/conf.h>
113 #include <sys/endian.h>
114 #include <sys/firmware.h>
115 #include <sys/kernel.h>
116 #include <sys/malloc.h>
117 #include <sys/mbuf.h>
118 #include <sys/mutex.h>
119 #include <sys/module.h>
120 #include <sys/proc.h>
121 #include <sys/rman.h>
122 #include <sys/socket.h>
123 #include <sys/sockio.h>
124 #include <sys/sysctl.h>
125 #include <sys/linker.h>
126
127 #include <machine/bus.h>
128 #include <machine/endian.h>
129 #include <machine/resource.h>
130
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcireg.h>
133
134 #include <net/bpf.h>
135
136 #include <net/if.h>
137 #include <net/if_var.h>
138 #include <net/if_arp.h>
139 #include <net/if_dl.h>
140 #include <net/if_media.h>
141 #include <net/if_types.h>
142
143 #include <netinet/in.h>
144 #include <netinet/in_systm.h>
145 #include <netinet/if_ether.h>
146 #include <netinet/ip.h>
147
148 #include <net80211/ieee80211_var.h>
149 #include <net80211/ieee80211_regdomain.h>
150 #include <net80211/ieee80211_ratectl.h>
151 #include <net80211/ieee80211_radiotap.h>
152
153 #include <dev/iwm/if_iwmreg.h>
154 #include <dev/iwm/if_iwmvar.h>
155 #include <dev/iwm/if_iwm_debug.h>
156 #include <dev/iwm/if_iwm_notif_wait.h>
157 #include <dev/iwm/if_iwm_util.h>
158 #include <dev/iwm/if_iwm_binding.h>
159 #include <dev/iwm/if_iwm_phy_db.h>
160 #include <dev/iwm/if_iwm_mac_ctxt.h>
161 #include <dev/iwm/if_iwm_phy_ctxt.h>
162 #include <dev/iwm/if_iwm_time_event.h>
163 #include <dev/iwm/if_iwm_power.h>
164 #include <dev/iwm/if_iwm_scan.h>
165
166 #include <dev/iwm/if_iwm_pcie_trans.h>
167 #include <dev/iwm/if_iwm_led.h>
168
169 #define IWM_NVM_HW_SECTION_NUM_FAMILY_7000      0
170 #define IWM_NVM_HW_SECTION_NUM_FAMILY_8000      10
171
172 /* lower blocks contain EEPROM image and calibration data */
173 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000      (16 * 512 * sizeof(uint16_t)) /* 16 KB */
174 #define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000      (32 * 512 * sizeof(uint16_t)) /* 32 KB */
175
176 #define IWM7260_FW      "iwm7260fw"
177 #define IWM3160_FW      "iwm3160fw"
178 #define IWM7265_FW      "iwm7265fw"
179 #define IWM7265D_FW     "iwm7265Dfw"
180 #define IWM8000_FW      "iwm8000Cfw"
181
182 #define IWM_DEVICE_7000_COMMON                                          \
183         .device_family = IWM_DEVICE_FAMILY_7000,                        \
184         .eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000,              \
185         .nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_7000
186
187 const struct iwm_cfg iwm7260_cfg = {
188         .fw_name = IWM7260_FW,
189         IWM_DEVICE_7000_COMMON,
190         .host_interrupt_operation_mode = 1,
191 };
192
193 const struct iwm_cfg iwm3160_cfg = {
194         .fw_name = IWM3160_FW,
195         IWM_DEVICE_7000_COMMON,
196         .host_interrupt_operation_mode = 1,
197 };
198
199 const struct iwm_cfg iwm3165_cfg = {
200         /* XXX IWM7265D_FW doesn't seem to work properly yet */
201         .fw_name = IWM7265_FW,
202         IWM_DEVICE_7000_COMMON,
203         .host_interrupt_operation_mode = 0,
204 };
205
206 const struct iwm_cfg iwm7265_cfg = {
207         .fw_name = IWM7265_FW,
208         IWM_DEVICE_7000_COMMON,
209         .host_interrupt_operation_mode = 0,
210 };
211
212 const struct iwm_cfg iwm7265d_cfg = {
213         /* XXX IWM7265D_FW doesn't seem to work properly yet */
214         .fw_name = IWM7265_FW,
215         IWM_DEVICE_7000_COMMON,
216         .host_interrupt_operation_mode = 0,
217 };
218
219 #define IWM_DEVICE_8000_COMMON                                          \
220         .device_family = IWM_DEVICE_FAMILY_8000,                        \
221         .eeprom_size = IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000,              \
222         .nvm_hw_section_num = IWM_NVM_HW_SECTION_NUM_FAMILY_8000
223
224 const struct iwm_cfg iwm8260_cfg = {
225         .fw_name = IWM8000_FW,
226         IWM_DEVICE_8000_COMMON,
227         .host_interrupt_operation_mode = 0,
228 };
229
230 const uint8_t iwm_nvm_channels[] = {
231         /* 2.4 GHz */
232         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
233         /* 5 GHz */
234         36, 40, 44, 48, 52, 56, 60, 64,
235         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
236         149, 153, 157, 161, 165
237 };
238 _Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
239     "IWM_NUM_CHANNELS is too small");
240
241 const uint8_t iwm_nvm_channels_8000[] = {
242         /* 2.4 GHz */
243         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
244         /* 5 GHz */
245         36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
246         96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
247         149, 153, 157, 161, 165, 169, 173, 177, 181
248 };
249 _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
250     "IWM_NUM_CHANNELS_8000 is too small");
251
252 #define IWM_NUM_2GHZ_CHANNELS   14
253 #define IWM_N_HW_ADDR_MASK      0xF
254
255 /*
256  * XXX For now, there's simply a fixed set of rate table entries
257  * that are populated.
258  */
259 const struct iwm_rate {
260         uint8_t rate;
261         uint8_t plcp;
262 } iwm_rates[] = {
263         {   2,  IWM_RATE_1M_PLCP  },
264         {   4,  IWM_RATE_2M_PLCP  },
265         {  11,  IWM_RATE_5M_PLCP  },
266         {  22,  IWM_RATE_11M_PLCP },
267         {  12,  IWM_RATE_6M_PLCP  },
268         {  18,  IWM_RATE_9M_PLCP  },
269         {  24,  IWM_RATE_12M_PLCP },
270         {  36,  IWM_RATE_18M_PLCP },
271         {  48,  IWM_RATE_24M_PLCP },
272         {  72,  IWM_RATE_36M_PLCP },
273         {  96,  IWM_RATE_48M_PLCP },
274         { 108,  IWM_RATE_54M_PLCP },
275 };
276 #define IWM_RIDX_CCK    0
277 #define IWM_RIDX_OFDM   4
278 #define IWM_RIDX_MAX    (nitems(iwm_rates)-1)
279 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
280 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
281
282 struct iwm_nvm_section {
283         uint16_t length;
284         uint8_t *data;
285 };
286
287 static int      iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
288 static int      iwm_firmware_store_section(struct iwm_softc *,
289                                            enum iwm_ucode_type,
290                                            const uint8_t *, size_t);
291 static int      iwm_set_default_calib(struct iwm_softc *, const void *);
292 static void     iwm_fw_info_free(struct iwm_fw_info *);
293 static int      iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
294 static void     iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
295 static int      iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
296                                      bus_size_t, bus_size_t);
297 static void     iwm_dma_contig_free(struct iwm_dma_info *);
298 static int      iwm_alloc_fwmem(struct iwm_softc *);
299 static int      iwm_alloc_sched(struct iwm_softc *);
300 static int      iwm_alloc_kw(struct iwm_softc *);
301 static int      iwm_alloc_ict(struct iwm_softc *);
302 static int      iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
303 static void     iwm_disable_rx_dma(struct iwm_softc *);
304 static void     iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
305 static void     iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
306 static int      iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
307                                   int);
308 static void     iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
309 static void     iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
310 static void     iwm_enable_interrupts(struct iwm_softc *);
311 static void     iwm_restore_interrupts(struct iwm_softc *);
312 static void     iwm_disable_interrupts(struct iwm_softc *);
313 static void     iwm_ict_reset(struct iwm_softc *);
314 static int      iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
315 static void     iwm_stop_device(struct iwm_softc *);
316 static void     iwm_mvm_nic_config(struct iwm_softc *);
317 static int      iwm_nic_rx_init(struct iwm_softc *);
318 static int      iwm_nic_tx_init(struct iwm_softc *);
319 static int      iwm_nic_init(struct iwm_softc *);
320 static int      iwm_enable_txq(struct iwm_softc *, int, int, int);
321 static int      iwm_post_alive(struct iwm_softc *);
322 static int      iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
323                                    uint16_t, uint8_t *, uint16_t *);
324 static int      iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
325                                      uint16_t *, uint32_t);
326 static uint32_t iwm_eeprom_channel_flags(uint16_t);
327 static void     iwm_add_channel_band(struct iwm_softc *,
328                     struct ieee80211_channel[], int, int *, int, size_t,
329                     const uint8_t[]);
330 static void     iwm_init_channel_map(struct ieee80211com *, int, int *,
331                     struct ieee80211_channel[]);
332 static struct iwm_nvm_data *
333         iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
334                            const uint16_t *, const uint16_t *,
335                            const uint16_t *, const uint16_t *,
336                            const uint16_t *);
337 static void     iwm_free_nvm_data(struct iwm_nvm_data *);
338 static void     iwm_set_hw_address_family_8000(struct iwm_softc *,
339                                                struct iwm_nvm_data *,
340                                                const uint16_t *,
341                                                const uint16_t *);
342 static int      iwm_get_sku(const struct iwm_softc *, const uint16_t *,
343                             const uint16_t *);
344 static int      iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
345 static int      iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
346                                   const uint16_t *);
347 static int      iwm_get_n_hw_addrs(const struct iwm_softc *,
348                                    const uint16_t *);
349 static void     iwm_set_radio_cfg(const struct iwm_softc *,
350                                   struct iwm_nvm_data *, uint32_t);
351 static struct iwm_nvm_data *
352         iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
353 static int      iwm_nvm_init(struct iwm_softc *);
354 static int      iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
355                                        const uint8_t *, uint32_t);
356 static int      iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
357                                         const uint8_t *, uint32_t);
358 static int      iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
359 static int      iwm_load_cpu_sections_8000(struct iwm_softc *,
360                                            struct iwm_fw_sects *, int , int *);
361 static int      iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
362 static int      iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
363 static int      iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
364 static int      iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
365 static int      iwm_send_phy_cfg_cmd(struct iwm_softc *);
366 static int      iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
367                                               enum iwm_ucode_type);
368 static int      iwm_run_init_mvm_ucode(struct iwm_softc *, int);
369 static int      iwm_rx_addbuf(struct iwm_softc *, int, int);
370 static int      iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
371 static int      iwm_mvm_get_signal_strength(struct iwm_softc *,
372                                             struct iwm_rx_phy_info *);
373 static void     iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
374                                       struct iwm_rx_packet *,
375                                       struct iwm_rx_data *);
376 static int      iwm_get_noise(struct iwm_softc *sc,
377                     const struct iwm_mvm_statistics_rx_non_phy *);
378 static void     iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
379                                    struct iwm_rx_data *);
380 static int      iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
381                                          struct iwm_rx_packet *,
382                                          struct iwm_node *);
383 static void     iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
384                                   struct iwm_rx_data *);
385 static void     iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
386 #if 0
387 static void     iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
388                                  uint16_t);
389 #endif
390 static const struct iwm_rate *
391         iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
392                         struct mbuf *, struct iwm_tx_cmd *);
393 static int      iwm_tx(struct iwm_softc *, struct mbuf *,
394                        struct ieee80211_node *, int);
395 static int      iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
396                              const struct ieee80211_bpf_params *);
397 static int      iwm_mvm_flush_tx_path(struct iwm_softc *sc,
398                                       uint32_t tfd_msk, uint32_t flags);
399 static int      iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
400                                                 struct iwm_mvm_add_sta_cmd_v7 *,
401                                                 int *);
402 static int      iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
403                                        int);
404 static int      iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
405 static int      iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
406 static int      iwm_mvm_add_int_sta_common(struct iwm_softc *,
407                                            struct iwm_int_sta *,
408                                            const uint8_t *, uint16_t, uint16_t);
409 static int      iwm_mvm_add_aux_sta(struct iwm_softc *);
410 static int      iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
411 static int      iwm_auth(struct ieee80211vap *, struct iwm_softc *);
412 static int      iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
413 static int      iwm_release(struct iwm_softc *, struct iwm_node *);
414 static struct ieee80211_node *
415                 iwm_node_alloc(struct ieee80211vap *,
416                                const uint8_t[IEEE80211_ADDR_LEN]);
417 static void     iwm_setrates(struct iwm_softc *, struct iwm_node *);
418 static int      iwm_media_change(struct ifnet *);
419 static int      iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
420 static void     iwm_endscan_cb(void *, int);
421 static void     iwm_mvm_fill_sf_command(struct iwm_softc *,
422                                         struct iwm_sf_cfg_cmd *,
423                                         struct ieee80211_node *);
424 static int      iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
425 static int      iwm_send_bt_init_conf(struct iwm_softc *);
426 static int      iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
427 static void     iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
428 static int      iwm_init_hw(struct iwm_softc *);
429 static void     iwm_init(struct iwm_softc *);
430 static void     iwm_start(struct iwm_softc *);
431 static void     iwm_stop(struct iwm_softc *);
432 static void     iwm_watchdog(void *);
433 static void     iwm_parent(struct ieee80211com *);
434 #ifdef IWM_DEBUG
435 static const char *
436                 iwm_desc_lookup(uint32_t);
437 static void     iwm_nic_error(struct iwm_softc *);
438 static void     iwm_nic_umac_error(struct iwm_softc *);
439 #endif
440 static void     iwm_notif_intr(struct iwm_softc *);
441 static void     iwm_intr(void *);
442 static int      iwm_attach(device_t);
443 static int      iwm_is_valid_ether_addr(uint8_t *);
444 static void     iwm_preinit(void *);
445 static int      iwm_detach_local(struct iwm_softc *sc, int);
446 static void     iwm_init_task(void *);
447 static void     iwm_radiotap_attach(struct iwm_softc *);
448 static struct ieee80211vap *
449                 iwm_vap_create(struct ieee80211com *,
450                                const char [IFNAMSIZ], int,
451                                enum ieee80211_opmode, int,
452                                const uint8_t [IEEE80211_ADDR_LEN],
453                                const uint8_t [IEEE80211_ADDR_LEN]);
454 static void     iwm_vap_delete(struct ieee80211vap *);
455 static void     iwm_scan_start(struct ieee80211com *);
456 static void     iwm_scan_end(struct ieee80211com *);
457 static void     iwm_update_mcast(struct ieee80211com *);
458 static void     iwm_set_channel(struct ieee80211com *);
459 static void     iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
460 static void     iwm_scan_mindwell(struct ieee80211_scan_state *);
461 static int      iwm_detach(device_t);
462
463 /*
464  * Firmware parser.
465  */
466
467 static int
468 iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
469 {
470         const struct iwm_fw_cscheme_list *l = (const void *)data;
471
472         if (dlen < sizeof(*l) ||
473             dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
474                 return EINVAL;
475
476         /* we don't actually store anything for now, always use s/w crypto */
477
478         return 0;
479 }
480
481 static int
482 iwm_firmware_store_section(struct iwm_softc *sc,
483     enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
484 {
485         struct iwm_fw_sects *fws;
486         struct iwm_fw_onesect *fwone;
487
488         if (type >= IWM_UCODE_TYPE_MAX)
489                 return EINVAL;
490         if (dlen < sizeof(uint32_t))
491                 return EINVAL;
492
493         fws = &sc->sc_fw.fw_sects[type];
494         if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
495                 return EINVAL;
496
497         fwone = &fws->fw_sect[fws->fw_count];
498
499         /* first 32bit are device load offset */
500         memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
501
502         /* rest is data */
503         fwone->fws_data = data + sizeof(uint32_t);
504         fwone->fws_len = dlen - sizeof(uint32_t);
505
506         fws->fw_count++;
507
508         return 0;
509 }
510
511 #define IWM_DEFAULT_SCAN_CHANNELS 40
512
513 /* iwlwifi: iwl-drv.c */
514 struct iwm_tlv_calib_data {
515         uint32_t ucode_type;
516         struct iwm_tlv_calib_ctrl calib;
517 } __packed;
518
519 static int
520 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
521 {
522         const struct iwm_tlv_calib_data *def_calib = data;
523         uint32_t ucode_type = le32toh(def_calib->ucode_type);
524
525         if (ucode_type >= IWM_UCODE_TYPE_MAX) {
526                 device_printf(sc->sc_dev,
527                     "Wrong ucode_type %u for default "
528                     "calibration.\n", ucode_type);
529                 return EINVAL;
530         }
531
532         sc->sc_default_calib[ucode_type].flow_trigger =
533             def_calib->calib.flow_trigger;
534         sc->sc_default_calib[ucode_type].event_trigger =
535             def_calib->calib.event_trigger;
536
537         return 0;
538 }
539
540 static void
541 iwm_fw_info_free(struct iwm_fw_info *fw)
542 {
543         firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
544         fw->fw_fp = NULL;
545         /* don't touch fw->fw_status */
546         memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
547 }
548
549 static int
550 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
551 {
552         struct iwm_fw_info *fw = &sc->sc_fw;
553         const struct iwm_tlv_ucode_header *uhdr;
554         struct iwm_ucode_tlv tlv;
555         enum iwm_ucode_tlv_type tlv_type;
556         const struct firmware *fwp;
557         const uint8_t *data;
558         uint32_t usniffer_img;
559         uint32_t paging_mem_size;
560         int error = 0;
561         size_t len;
562
563         if (fw->fw_status == IWM_FW_STATUS_DONE &&
564             ucode_type != IWM_UCODE_INIT)
565                 return 0;
566
567         while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
568                 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
569         fw->fw_status = IWM_FW_STATUS_INPROGRESS;
570
571         if (fw->fw_fp != NULL)
572                 iwm_fw_info_free(fw);
573
574         /*
575          * Load firmware into driver memory.
576          * fw_fp will be set.
577          */
578         IWM_UNLOCK(sc);
579         fwp = firmware_get(sc->cfg->fw_name);
580         IWM_LOCK(sc);
581         if (fwp == NULL) {
582                 device_printf(sc->sc_dev,
583                     "could not read firmware %s (error %d)\n",
584                     sc->cfg->fw_name, error);
585                 goto out;
586         }
587         fw->fw_fp = fwp;
588
589         /* (Re-)Initialize default values. */
590         sc->sc_capaflags = 0;
591         sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
592         memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
593         memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
594
595         /*
596          * Parse firmware contents
597          */
598
599         uhdr = (const void *)fw->fw_fp->data;
600         if (*(const uint32_t *)fw->fw_fp->data != 0
601             || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
602                 device_printf(sc->sc_dev, "invalid firmware %s\n",
603                     sc->cfg->fw_name);
604                 error = EINVAL;
605                 goto out;
606         }
607
608         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
609             IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
610             IWM_UCODE_MINOR(le32toh(uhdr->ver)),
611             IWM_UCODE_API(le32toh(uhdr->ver)));
612         data = uhdr->data;
613         len = fw->fw_fp->datasize - sizeof(*uhdr);
614
615         while (len >= sizeof(tlv)) {
616                 size_t tlv_len;
617                 const void *tlv_data;
618
619                 memcpy(&tlv, data, sizeof(tlv));
620                 tlv_len = le32toh(tlv.length);
621                 tlv_type = le32toh(tlv.type);
622
623                 len -= sizeof(tlv);
624                 data += sizeof(tlv);
625                 tlv_data = data;
626
627                 if (len < tlv_len) {
628                         device_printf(sc->sc_dev,
629                             "firmware too short: %zu bytes\n",
630                             len);
631                         error = EINVAL;
632                         goto parse_out;
633                 }
634
635                 switch ((int)tlv_type) {
636                 case IWM_UCODE_TLV_PROBE_MAX_LEN:
637                         if (tlv_len < sizeof(uint32_t)) {
638                                 device_printf(sc->sc_dev,
639                                     "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
640                                     __func__,
641                                     (int) tlv_len);
642                                 error = EINVAL;
643                                 goto parse_out;
644                         }
645                         sc->sc_capa_max_probe_len
646                             = le32toh(*(const uint32_t *)tlv_data);
647                         /* limit it to something sensible */
648                         if (sc->sc_capa_max_probe_len >
649                             IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
650                                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
651                                     "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
652                                     "ridiculous\n", __func__);
653                                 error = EINVAL;
654                                 goto parse_out;
655                         }
656                         break;
657                 case IWM_UCODE_TLV_PAN:
658                         if (tlv_len) {
659                                 device_printf(sc->sc_dev,
660                                     "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
661                                     __func__,
662                                     (int) tlv_len);
663                                 error = EINVAL;
664                                 goto parse_out;
665                         }
666                         sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
667                         break;
668                 case IWM_UCODE_TLV_FLAGS:
669                         if (tlv_len < sizeof(uint32_t)) {
670                                 device_printf(sc->sc_dev,
671                                     "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
672                                     __func__,
673                                     (int) tlv_len);
674                                 error = EINVAL;
675                                 goto parse_out;
676                         }
677                         /*
678                          * Apparently there can be many flags, but Linux driver
679                          * parses only the first one, and so do we.
680                          *
681                          * XXX: why does this override IWM_UCODE_TLV_PAN?
682                          * Intentional or a bug?  Observations from
683                          * current firmware file:
684                          *  1) TLV_PAN is parsed first
685                          *  2) TLV_FLAGS contains TLV_FLAGS_PAN
686                          * ==> this resets TLV_PAN to itself... hnnnk
687                          */
688                         sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
689                         break;
690                 case IWM_UCODE_TLV_CSCHEME:
691                         if ((error = iwm_store_cscheme(sc,
692                             tlv_data, tlv_len)) != 0) {
693                                 device_printf(sc->sc_dev,
694                                     "%s: iwm_store_cscheme(): returned %d\n",
695                                     __func__,
696                                     error);
697                                 goto parse_out;
698                         }
699                         break;
700                 case IWM_UCODE_TLV_NUM_OF_CPU: {
701                         uint32_t num_cpu;
702                         if (tlv_len != sizeof(uint32_t)) {
703                                 device_printf(sc->sc_dev,
704                                     "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
705                                     __func__,
706                                     (int) tlv_len);
707                                 error = EINVAL;
708                                 goto parse_out;
709                         }
710                         num_cpu = le32toh(*(const uint32_t *)tlv_data);
711                         if (num_cpu < 1 || num_cpu > 2) {
712                                 device_printf(sc->sc_dev,
713                                     "%s: Driver supports only 1 or 2 CPUs\n",
714                                     __func__);
715                                 error = EINVAL;
716                                 goto parse_out;
717                         }
718                         break;
719                 }
720                 case IWM_UCODE_TLV_SEC_RT:
721                         if ((error = iwm_firmware_store_section(sc,
722                             IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
723                                 device_printf(sc->sc_dev,
724                                     "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
725                                     __func__,
726                                     error);
727                                 goto parse_out;
728                         }
729                         break;
730                 case IWM_UCODE_TLV_SEC_INIT:
731                         if ((error = iwm_firmware_store_section(sc,
732                             IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
733                                 device_printf(sc->sc_dev,
734                                     "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
735                                     __func__,
736                                     error);
737                                 goto parse_out;
738                         }
739                         break;
740                 case IWM_UCODE_TLV_SEC_WOWLAN:
741                         if ((error = iwm_firmware_store_section(sc,
742                             IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
743                                 device_printf(sc->sc_dev,
744                                     "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
745                                     __func__,
746                                     error);
747                                 goto parse_out;
748                         }
749                         break;
750                 case IWM_UCODE_TLV_DEF_CALIB:
751                         if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
752                                 device_printf(sc->sc_dev,
753                                     "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
754                                     __func__,
755                                     (int) tlv_len,
756                                     (int) sizeof(struct iwm_tlv_calib_data));
757                                 error = EINVAL;
758                                 goto parse_out;
759                         }
760                         if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
761                                 device_printf(sc->sc_dev,
762                                     "%s: iwm_set_default_calib() failed: %d\n",
763                                     __func__,
764                                     error);
765                                 goto parse_out;
766                         }
767                         break;
768                 case IWM_UCODE_TLV_PHY_SKU:
769                         if (tlv_len != sizeof(uint32_t)) {
770                                 error = EINVAL;
771                                 device_printf(sc->sc_dev,
772                                     "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
773                                     __func__,
774                                     (int) tlv_len);
775                                 goto parse_out;
776                         }
777                         sc->sc_fw.phy_config =
778                             le32toh(*(const uint32_t *)tlv_data);
779                         sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
780                                                   IWM_FW_PHY_CFG_TX_CHAIN) >>
781                                                   IWM_FW_PHY_CFG_TX_CHAIN_POS;
782                         sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
783                                                   IWM_FW_PHY_CFG_RX_CHAIN) >>
784                                                   IWM_FW_PHY_CFG_RX_CHAIN_POS;
785                         break;
786
787                 case IWM_UCODE_TLV_API_CHANGES_SET: {
788                         const struct iwm_ucode_api *api;
789                         if (tlv_len != sizeof(*api)) {
790                                 error = EINVAL;
791                                 goto parse_out;
792                         }
793                         api = (const struct iwm_ucode_api *)tlv_data;
794                         /* Flags may exceed 32 bits in future firmware. */
795                         if (le32toh(api->api_index) > 0) {
796                                 device_printf(sc->sc_dev,
797                                     "unsupported API index %d\n",
798                                     le32toh(api->api_index));
799                                 goto parse_out;
800                         }
801                         sc->sc_ucode_api = le32toh(api->api_flags);
802                         break;
803                 }
804
805                 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
806                         const struct iwm_ucode_capa *capa;
807                         int idx, i;
808                         if (tlv_len != sizeof(*capa)) {
809                                 error = EINVAL;
810                                 goto parse_out;
811                         }
812                         capa = (const struct iwm_ucode_capa *)tlv_data;
813                         idx = le32toh(capa->api_index);
814                         if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
815                                 device_printf(sc->sc_dev,
816                                     "unsupported API index %d\n", idx);
817                                 goto parse_out;
818                         }
819                         for (i = 0; i < 32; i++) {
820                                 if ((le32toh(capa->api_capa) & (1U << i)) == 0)
821                                         continue;
822                                 setbit(sc->sc_enabled_capa, i + (32 * idx));
823                         }
824                         break;
825                 }
826
827                 case 48: /* undocumented TLV */
828                 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
829                 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
830                         /* ignore, not used by current driver */
831                         break;
832
833                 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
834                         if ((error = iwm_firmware_store_section(sc,
835                             IWM_UCODE_REGULAR_USNIFFER, tlv_data,
836                             tlv_len)) != 0)
837                                 goto parse_out;
838                         break;
839
840                 case IWM_UCODE_TLV_PAGING:
841                         if (tlv_len != sizeof(uint32_t)) {
842                                 error = EINVAL;
843                                 goto parse_out;
844                         }
845                         paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
846
847                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
848                             "%s: Paging: paging enabled (size = %u bytes)\n",
849                             __func__, paging_mem_size);
850                         if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
851                                 device_printf(sc->sc_dev,
852                                         "%s: Paging: driver supports up to %u bytes for paging image\n",
853                                         __func__, IWM_MAX_PAGING_IMAGE_SIZE);
854                                 error = EINVAL;
855                                 goto out;
856                         }
857                         if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
858                                 device_printf(sc->sc_dev,
859                                     "%s: Paging: image isn't multiple %u\n",
860                                     __func__, IWM_FW_PAGING_SIZE);
861                                 error = EINVAL;
862                                 goto out;
863                         }
864
865                         sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
866                             paging_mem_size;
867                         usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
868                         sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
869                             paging_mem_size;
870                         break;
871
872                 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
873                         if (tlv_len != sizeof(uint32_t)) {
874                                 error = EINVAL;
875                                 goto parse_out;
876                         }
877                         sc->sc_capa_n_scan_channels =
878                           le32toh(*(const uint32_t *)tlv_data);
879                         break;
880
881                 case IWM_UCODE_TLV_FW_VERSION:
882                         if (tlv_len != sizeof(uint32_t) * 3) {
883                                 error = EINVAL;
884                                 goto parse_out;
885                         }
886                         snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
887                             "%d.%d.%d",
888                             le32toh(((const uint32_t *)tlv_data)[0]),
889                             le32toh(((const uint32_t *)tlv_data)[1]),
890                             le32toh(((const uint32_t *)tlv_data)[2]));
891                         break;
892
893                 default:
894                         device_printf(sc->sc_dev,
895                             "%s: unknown firmware section %d, abort\n",
896                             __func__, tlv_type);
897                         error = EINVAL;
898                         goto parse_out;
899                 }
900
901                 len -= roundup(tlv_len, 4);
902                 data += roundup(tlv_len, 4);
903         }
904
905         KASSERT(error == 0, ("unhandled error"));
906
907  parse_out:
908         if (error) {
909                 device_printf(sc->sc_dev, "firmware parse error %d, "
910                     "section type %d\n", error, tlv_type);
911         }
912
913         if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
914                 device_printf(sc->sc_dev,
915                     "device uses unsupported power ops\n");
916                 error = ENOTSUP;
917         }
918
919  out:
920         if (error) {
921                 fw->fw_status = IWM_FW_STATUS_NONE;
922                 if (fw->fw_fp != NULL)
923                         iwm_fw_info_free(fw);
924         } else
925                 fw->fw_status = IWM_FW_STATUS_DONE;
926         wakeup(&sc->sc_fw);
927
928         return error;
929 }
930
931 /*
932  * DMA resource routines
933  */
934
935 static void
936 iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
937 {
938         if (error != 0)
939                 return;
940         KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
941         *(bus_addr_t *)arg = segs[0].ds_addr;
942 }
943
944 static int
945 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
946     bus_size_t size, bus_size_t alignment)
947 {
948         int error;
949
950         dma->tag = NULL;
951         dma->map = NULL;
952         dma->size = size;
953         dma->vaddr = NULL;
954
955         error = bus_dma_tag_create(tag, alignment,
956             0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
957             1, size, 0, NULL, NULL, &dma->tag);
958         if (error != 0)
959                 goto fail;
960
961         error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
962             BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
963         if (error != 0)
964                 goto fail;
965
966         error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
967             iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
968         if (error != 0) {
969                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
970                 dma->vaddr = NULL;
971                 goto fail;
972         }
973
974         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
975
976         return 0;
977
978 fail:
979         iwm_dma_contig_free(dma);
980
981         return error;
982 }
983
984 static void
985 iwm_dma_contig_free(struct iwm_dma_info *dma)
986 {
987         if (dma->vaddr != NULL) {
988                 bus_dmamap_sync(dma->tag, dma->map,
989                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
990                 bus_dmamap_unload(dma->tag, dma->map);
991                 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
992                 dma->vaddr = NULL;
993         }
994         if (dma->tag != NULL) {
995                 bus_dma_tag_destroy(dma->tag);
996                 dma->tag = NULL;
997         }
998 }
999
1000 /* fwmem is used to load firmware onto the card */
1001 static int
1002 iwm_alloc_fwmem(struct iwm_softc *sc)
1003 {
1004         /* Must be aligned on a 16-byte boundary. */
1005         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
1006             sc->sc_fwdmasegsz, 16);
1007 }
1008
1009 /* tx scheduler rings.  not used? */
1010 static int
1011 iwm_alloc_sched(struct iwm_softc *sc)
1012 {
1013         /* TX scheduler rings must be aligned on a 1KB boundary. */
1014         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
1015             nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
1016 }
1017
1018 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
1019 static int
1020 iwm_alloc_kw(struct iwm_softc *sc)
1021 {
1022         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
1023 }
1024
1025 /* interrupt cause table */
1026 static int
1027 iwm_alloc_ict(struct iwm_softc *sc)
1028 {
1029         return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
1030             IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
1031 }
1032
1033 static int
1034 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1035 {
1036         bus_size_t size;
1037         int i, error;
1038
1039         ring->cur = 0;
1040
1041         /* Allocate RX descriptors (256-byte aligned). */
1042         size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1043         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1044         if (error != 0) {
1045                 device_printf(sc->sc_dev,
1046                     "could not allocate RX ring DMA memory\n");
1047                 goto fail;
1048         }
1049         ring->desc = ring->desc_dma.vaddr;
1050
1051         /* Allocate RX status area (16-byte aligned). */
1052         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1053             sizeof(*ring->stat), 16);
1054         if (error != 0) {
1055                 device_printf(sc->sc_dev,
1056                     "could not allocate RX status DMA memory\n");
1057                 goto fail;
1058         }
1059         ring->stat = ring->stat_dma.vaddr;
1060
1061         /* Create RX buffer DMA tag. */
1062         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1063             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1064             IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1065         if (error != 0) {
1066                 device_printf(sc->sc_dev,
1067                     "%s: could not create RX buf DMA tag, error %d\n",
1068                     __func__, error);
1069                 goto fail;
1070         }
1071
1072         /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1073         error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1074         if (error != 0) {
1075                 device_printf(sc->sc_dev,
1076                     "%s: could not create RX buf DMA map, error %d\n",
1077                     __func__, error);
1078                 goto fail;
1079         }
1080         /*
1081          * Allocate and map RX buffers.
1082          */
1083         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1084                 struct iwm_rx_data *data = &ring->data[i];
1085                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1086                 if (error != 0) {
1087                         device_printf(sc->sc_dev,
1088                             "%s: could not create RX buf DMA map, error %d\n",
1089                             __func__, error);
1090                         goto fail;
1091                 }
1092                 data->m = NULL;
1093
1094                 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1095                         goto fail;
1096                 }
1097         }
1098         return 0;
1099
1100 fail:   iwm_free_rx_ring(sc, ring);
1101         return error;
1102 }
1103
1104 static void
1105 iwm_disable_rx_dma(struct iwm_softc *sc)
1106 {
1107         /* XXX conditional nic locks are stupid */
1108         /* XXX print out if we can't lock the NIC? */
1109         if (iwm_nic_lock(sc)) {
1110                 /* XXX handle if RX stop doesn't finish? */
1111                 (void) iwm_pcie_rx_stop(sc);
1112                 iwm_nic_unlock(sc);
1113         }
1114 }
1115
1116 static void
1117 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1118 {
1119         /* Reset the ring state */
1120         ring->cur = 0;
1121
1122         /*
1123          * The hw rx ring index in shared memory must also be cleared,
1124          * otherwise the discrepancy can cause reprocessing chaos.
1125          */
1126         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1127 }
1128
1129 static void
1130 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1131 {
1132         int i;
1133
1134         iwm_dma_contig_free(&ring->desc_dma);
1135         iwm_dma_contig_free(&ring->stat_dma);
1136
1137         for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1138                 struct iwm_rx_data *data = &ring->data[i];
1139
1140                 if (data->m != NULL) {
1141                         bus_dmamap_sync(ring->data_dmat, data->map,
1142                             BUS_DMASYNC_POSTREAD);
1143                         bus_dmamap_unload(ring->data_dmat, data->map);
1144                         m_freem(data->m);
1145                         data->m = NULL;
1146                 }
1147                 if (data->map != NULL) {
1148                         bus_dmamap_destroy(ring->data_dmat, data->map);
1149                         data->map = NULL;
1150                 }
1151         }
1152         if (ring->spare_map != NULL) {
1153                 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1154                 ring->spare_map = NULL;
1155         }
1156         if (ring->data_dmat != NULL) {
1157                 bus_dma_tag_destroy(ring->data_dmat);
1158                 ring->data_dmat = NULL;
1159         }
1160 }
1161
1162 static int
1163 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1164 {
1165         bus_addr_t paddr;
1166         bus_size_t size;
1167         size_t maxsize;
1168         int nsegments;
1169         int i, error;
1170
1171         ring->qid = qid;
1172         ring->queued = 0;
1173         ring->cur = 0;
1174
1175         /* Allocate TX descriptors (256-byte aligned). */
1176         size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1177         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1178         if (error != 0) {
1179                 device_printf(sc->sc_dev,
1180                     "could not allocate TX ring DMA memory\n");
1181                 goto fail;
1182         }
1183         ring->desc = ring->desc_dma.vaddr;
1184
1185         /*
1186          * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1187          * to allocate commands space for other rings.
1188          */
1189         if (qid > IWM_MVM_CMD_QUEUE)
1190                 return 0;
1191
1192         size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1193         error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1194         if (error != 0) {
1195                 device_printf(sc->sc_dev,
1196                     "could not allocate TX cmd DMA memory\n");
1197                 goto fail;
1198         }
1199         ring->cmd = ring->cmd_dma.vaddr;
1200
1201         /* FW commands may require more mapped space than packets. */
1202         if (qid == IWM_MVM_CMD_QUEUE) {
1203                 maxsize = IWM_RBUF_SIZE;
1204                 nsegments = 1;
1205         } else {
1206                 maxsize = MCLBYTES;
1207                 nsegments = IWM_MAX_SCATTER - 2;
1208         }
1209
1210         error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1211             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1212             nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1213         if (error != 0) {
1214                 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1215                 goto fail;
1216         }
1217
1218         paddr = ring->cmd_dma.paddr;
1219         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1220                 struct iwm_tx_data *data = &ring->data[i];
1221
1222                 data->cmd_paddr = paddr;
1223                 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1224                     + offsetof(struct iwm_tx_cmd, scratch);
1225                 paddr += sizeof(struct iwm_device_cmd);
1226
1227                 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1228                 if (error != 0) {
1229                         device_printf(sc->sc_dev,
1230                             "could not create TX buf DMA map\n");
1231                         goto fail;
1232                 }
1233         }
1234         KASSERT(paddr == ring->cmd_dma.paddr + size,
1235             ("invalid physical address"));
1236         return 0;
1237
1238 fail:   iwm_free_tx_ring(sc, ring);
1239         return error;
1240 }
1241
1242 static void
1243 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1244 {
1245         int i;
1246
1247         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1248                 struct iwm_tx_data *data = &ring->data[i];
1249
1250                 if (data->m != NULL) {
1251                         bus_dmamap_sync(ring->data_dmat, data->map,
1252                             BUS_DMASYNC_POSTWRITE);
1253                         bus_dmamap_unload(ring->data_dmat, data->map);
1254                         m_freem(data->m);
1255                         data->m = NULL;
1256                 }
1257         }
1258         /* Clear TX descriptors. */
1259         memset(ring->desc, 0, ring->desc_dma.size);
1260         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1261             BUS_DMASYNC_PREWRITE);
1262         sc->qfullmsk &= ~(1 << ring->qid);
1263         ring->queued = 0;
1264         ring->cur = 0;
1265 }
1266
1267 static void
1268 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1269 {
1270         int i;
1271
1272         iwm_dma_contig_free(&ring->desc_dma);
1273         iwm_dma_contig_free(&ring->cmd_dma);
1274
1275         for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1276                 struct iwm_tx_data *data = &ring->data[i];
1277
1278                 if (data->m != NULL) {
1279                         bus_dmamap_sync(ring->data_dmat, data->map,
1280                             BUS_DMASYNC_POSTWRITE);
1281                         bus_dmamap_unload(ring->data_dmat, data->map);
1282                         m_freem(data->m);
1283                         data->m = NULL;
1284                 }
1285                 if (data->map != NULL) {
1286                         bus_dmamap_destroy(ring->data_dmat, data->map);
1287                         data->map = NULL;
1288                 }
1289         }
1290         if (ring->data_dmat != NULL) {
1291                 bus_dma_tag_destroy(ring->data_dmat);
1292                 ring->data_dmat = NULL;
1293         }
1294 }
1295
1296 /*
1297  * High-level hardware frobbing routines
1298  */
1299
1300 static void
1301 iwm_enable_interrupts(struct iwm_softc *sc)
1302 {
1303         sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1304         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1305 }
1306
1307 static void
1308 iwm_restore_interrupts(struct iwm_softc *sc)
1309 {
1310         IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1311 }
1312
1313 static void
1314 iwm_disable_interrupts(struct iwm_softc *sc)
1315 {
1316         /* disable interrupts */
1317         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1318
1319         /* acknowledge all interrupts */
1320         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1321         IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1322 }
1323
1324 static void
1325 iwm_ict_reset(struct iwm_softc *sc)
1326 {
1327         iwm_disable_interrupts(sc);
1328
1329         /* Reset ICT table. */
1330         memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1331         sc->ict_cur = 0;
1332
1333         /* Set physical address of ICT table (4KB aligned). */
1334         IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1335             IWM_CSR_DRAM_INT_TBL_ENABLE
1336             | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1337             | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1338             | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1339
1340         /* Switch to ICT interrupt mode in driver. */
1341         sc->sc_flags |= IWM_FLAG_USE_ICT;
1342
1343         /* Re-enable interrupts. */
1344         IWM_WRITE(sc, IWM_CSR_INT, ~0);
1345         iwm_enable_interrupts(sc);
1346 }
1347
1348 /* iwlwifi pcie/trans.c */
1349
1350 /*
1351  * Since this .. hard-resets things, it's time to actually
1352  * mark the first vap (if any) as having no mac context.
1353  * It's annoying, but since the driver is potentially being
1354  * stop/start'ed whilst active (thanks openbsd port!) we
1355  * have to correctly track this.
1356  */
1357 static void
1358 iwm_stop_device(struct iwm_softc *sc)
1359 {
1360         struct ieee80211com *ic = &sc->sc_ic;
1361         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1362         int chnl, qid;
1363         uint32_t mask = 0;
1364
1365         /* tell the device to stop sending interrupts */
1366         iwm_disable_interrupts(sc);
1367
1368         /*
1369          * FreeBSD-local: mark the first vap as not-uploaded,
1370          * so the next transition through auth/assoc
1371          * will correctly populate the MAC context.
1372          */
1373         if (vap) {
1374                 struct iwm_vap *iv = IWM_VAP(vap);
1375                 iv->is_uploaded = 0;
1376         }
1377
1378         /* device going down, Stop using ICT table */
1379         sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1380
1381         /* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1382
1383         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1384
1385         if (iwm_nic_lock(sc)) {
1386                 /* Stop each Tx DMA channel */
1387                 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1388                         IWM_WRITE(sc,
1389                             IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1390                         mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1391                 }
1392
1393                 /* Wait for DMA channels to be idle */
1394                 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1395                     5000)) {
1396                         device_printf(sc->sc_dev,
1397                             "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1398                             IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1399                 }
1400                 iwm_nic_unlock(sc);
1401         }
1402         iwm_disable_rx_dma(sc);
1403
1404         /* Stop RX ring. */
1405         iwm_reset_rx_ring(sc, &sc->rxq);
1406
1407         /* Reset all TX rings. */
1408         for (qid = 0; qid < nitems(sc->txq); qid++)
1409                 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1410
1411         /*
1412          * Power-down device's busmaster DMA clocks
1413          */
1414         iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1415         DELAY(5);
1416
1417         /* Make sure (redundant) we've released our request to stay awake */
1418         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1419             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1420
1421         /* Stop the device, and put it in low power state */
1422         iwm_apm_stop(sc);
1423
1424         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1425          * Clean again the interrupt here
1426          */
1427         iwm_disable_interrupts(sc);
1428         /* stop and reset the on-board processor */
1429         IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1430
1431         /*
1432          * Even if we stop the HW, we still want the RF kill
1433          * interrupt
1434          */
1435         iwm_enable_rfkill_int(sc);
1436         iwm_check_rfkill(sc);
1437 }
1438
1439 /* iwlwifi: mvm/ops.c */
1440 static void
1441 iwm_mvm_nic_config(struct iwm_softc *sc)
1442 {
1443         uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1444         uint32_t reg_val = 0;
1445         uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1446
1447         radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1448             IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1449         radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1450             IWM_FW_PHY_CFG_RADIO_STEP_POS;
1451         radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1452             IWM_FW_PHY_CFG_RADIO_DASH_POS;
1453
1454         /* SKU control */
1455         reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1456             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1457         reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1458             IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1459
1460         /* radio configuration */
1461         reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1462         reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1463         reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1464
1465         IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1466
1467         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1468             "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1469             radio_cfg_step, radio_cfg_dash);
1470
1471         /*
1472          * W/A : NIC is stuck in a reset state after Early PCIe power off
1473          * (PCIe power is lost before PERST# is asserted), causing ME FW
1474          * to lose ownership and not being able to obtain it back.
1475          */
1476         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1477                 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1478                     IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1479                     ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1480         }
1481 }
1482
1483 static int
1484 iwm_nic_rx_init(struct iwm_softc *sc)
1485 {
1486         if (!iwm_nic_lock(sc))
1487                 return EBUSY;
1488
1489         /*
1490          * Initialize RX ring.  This is from the iwn driver.
1491          */
1492         memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1493
1494         /* stop DMA */
1495         iwm_disable_rx_dma(sc);
1496         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1497         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1498         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1499         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1500
1501         /* Set physical address of RX ring (256-byte aligned). */
1502         IWM_WRITE(sc,
1503             IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1504
1505         /* Set physical address of RX status (16-byte aligned). */
1506         IWM_WRITE(sc,
1507             IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1508
1509         /* Enable RX. */
1510         IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1511             IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL            |
1512             IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY               |  /* HW bug */
1513             IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL   |
1514             IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK        |
1515             (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1516             IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K            |
1517             IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1518
1519         IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1520
1521         /* W/A for interrupt coalescing bug in 7260 and 3160 */
1522         if (sc->cfg->host_interrupt_operation_mode)
1523                 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1524
1525         /*
1526          * Thus sayeth el jefe (iwlwifi) via a comment:
1527          *
1528          * This value should initially be 0 (before preparing any
1529          * RBs), should be 8 after preparing the first 8 RBs (for example)
1530          */
1531         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1532
1533         iwm_nic_unlock(sc);
1534
1535         return 0;
1536 }
1537
1538 static int
1539 iwm_nic_tx_init(struct iwm_softc *sc)
1540 {
1541         int qid;
1542
1543         if (!iwm_nic_lock(sc))
1544                 return EBUSY;
1545
1546         /* Deactivate TX scheduler. */
1547         iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1548
1549         /* Set physical address of "keep warm" page (16-byte aligned). */
1550         IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1551
1552         /* Initialize TX rings. */
1553         for (qid = 0; qid < nitems(sc->txq); qid++) {
1554                 struct iwm_tx_ring *txq = &sc->txq[qid];
1555
1556                 /* Set physical address of TX ring (256-byte aligned). */
1557                 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1558                     txq->desc_dma.paddr >> 8);
1559                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1560                     "%s: loading ring %d descriptors (%p) at %lx\n",
1561                     __func__,
1562                     qid, txq->desc,
1563                     (unsigned long) (txq->desc_dma.paddr >> 8));
1564         }
1565
1566         iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1567
1568         iwm_nic_unlock(sc);
1569
1570         return 0;
1571 }
1572
1573 static int
1574 iwm_nic_init(struct iwm_softc *sc)
1575 {
1576         int error;
1577
1578         iwm_apm_init(sc);
1579         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1580                 iwm_set_pwr(sc);
1581
1582         iwm_mvm_nic_config(sc);
1583
1584         if ((error = iwm_nic_rx_init(sc)) != 0)
1585                 return error;
1586
1587         /*
1588          * Ditto for TX, from iwn
1589          */
1590         if ((error = iwm_nic_tx_init(sc)) != 0)
1591                 return error;
1592
1593         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1594             "%s: shadow registers enabled\n", __func__);
1595         IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1596
1597         return 0;
1598 }
1599
1600 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1601         IWM_MVM_TX_FIFO_VO,
1602         IWM_MVM_TX_FIFO_VI,
1603         IWM_MVM_TX_FIFO_BE,
1604         IWM_MVM_TX_FIFO_BK,
1605 };
1606
1607 static int
1608 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1609 {
1610         if (!iwm_nic_lock(sc)) {
1611                 device_printf(sc->sc_dev,
1612                     "%s: cannot enable txq %d\n",
1613                     __func__,
1614                     qid);
1615                 return EBUSY;
1616         }
1617
1618         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1619
1620         if (qid == IWM_MVM_CMD_QUEUE) {
1621                 /* unactivate before configuration */
1622                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1623                     (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1624                     | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1625
1626                 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1627
1628                 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1629
1630                 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1631                 /* Set scheduler window size and frame limit. */
1632                 iwm_write_mem32(sc,
1633                     sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1634                     sizeof(uint32_t),
1635                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1636                     IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1637                     ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1638                     IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1639
1640                 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1641                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1642                     (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1643                     (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1644                     IWM_SCD_QUEUE_STTS_REG_MSK);
1645         } else {
1646                 struct iwm_scd_txq_cfg_cmd cmd;
1647                 int error;
1648
1649                 iwm_nic_unlock(sc);
1650
1651                 memset(&cmd, 0, sizeof(cmd));
1652                 cmd.scd_queue = qid;
1653                 cmd.enable = 1;
1654                 cmd.sta_id = sta_id;
1655                 cmd.tx_fifo = fifo;
1656                 cmd.aggregate = 0;
1657                 cmd.window = IWM_FRAME_LIMIT;
1658
1659                 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1660                     sizeof(cmd), &cmd);
1661                 if (error) {
1662                         device_printf(sc->sc_dev,
1663                             "cannot enable txq %d\n", qid);
1664                         return error;
1665                 }
1666
1667                 if (!iwm_nic_lock(sc))
1668                         return EBUSY;
1669         }
1670
1671         iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1672             iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1673
1674         iwm_nic_unlock(sc);
1675
1676         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1677             __func__, qid, fifo);
1678
1679         return 0;
1680 }
1681
1682 static int
1683 iwm_post_alive(struct iwm_softc *sc)
1684 {
1685         int nwords;
1686         int error, chnl;
1687         uint32_t base;
1688
1689         if (!iwm_nic_lock(sc))
1690                 return EBUSY;
1691
1692         base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1693         if (sc->sched_base != base) {
1694                 device_printf(sc->sc_dev,
1695                     "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1696                     __func__, sc->sched_base, base);
1697         }
1698
1699         iwm_ict_reset(sc);
1700
1701         /* Clear TX scheduler state in SRAM. */
1702         nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1703             IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1704             / sizeof(uint32_t);
1705         error = iwm_write_mem(sc,
1706             sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1707             NULL, nwords);
1708         if (error)
1709                 goto out;
1710
1711         /* Set physical address of TX scheduler rings (1KB aligned). */
1712         iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1713
1714         iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1715
1716         iwm_nic_unlock(sc);
1717
1718         /* enable command channel */
1719         error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1720         if (error)
1721                 return error;
1722
1723         if (!iwm_nic_lock(sc))
1724                 return EBUSY;
1725
1726         iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1727
1728         /* Enable DMA channels. */
1729         for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1730                 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1731                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1732                     IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1733         }
1734
1735         IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1736             IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1737
1738         /* Enable L1-Active */
1739         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1740                 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1741                     IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1742         }
1743
1744  out:
1745         iwm_nic_unlock(sc);
1746         return error;
1747 }
1748
1749 /*
1750  * NVM read access and content parsing.  We do not support
1751  * external NVM or writing NVM.
1752  * iwlwifi/mvm/nvm.c
1753  */
1754
1755 /* Default NVM size to read */
1756 #define IWM_NVM_DEFAULT_CHUNK_SIZE      (2*1024)
1757
1758 #define IWM_NVM_WRITE_OPCODE 1
1759 #define IWM_NVM_READ_OPCODE 0
1760
1761 /* load nvm chunk response */
1762 enum {
1763         IWM_READ_NVM_CHUNK_SUCCEED = 0,
1764         IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1765 };
1766
1767 static int
1768 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1769         uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1770 {
1771         struct iwm_nvm_access_cmd nvm_access_cmd = {
1772                 .offset = htole16(offset),
1773                 .length = htole16(length),
1774                 .type = htole16(section),
1775                 .op_code = IWM_NVM_READ_OPCODE,
1776         };
1777         struct iwm_nvm_access_resp *nvm_resp;
1778         struct iwm_rx_packet *pkt;
1779         struct iwm_host_cmd cmd = {
1780                 .id = IWM_NVM_ACCESS_CMD,
1781                 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1782                 .data = { &nvm_access_cmd, },
1783         };
1784         int ret, bytes_read, offset_read;
1785         uint8_t *resp_data;
1786
1787         cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1788
1789         ret = iwm_send_cmd(sc, &cmd);
1790         if (ret) {
1791                 device_printf(sc->sc_dev,
1792                     "Could not send NVM_ACCESS command (error=%d)\n", ret);
1793                 return ret;
1794         }
1795
1796         pkt = cmd.resp_pkt;
1797
1798         /* Extract NVM response */
1799         nvm_resp = (void *)pkt->data;
1800         ret = le16toh(nvm_resp->status);
1801         bytes_read = le16toh(nvm_resp->length);
1802         offset_read = le16toh(nvm_resp->offset);
1803         resp_data = nvm_resp->data;
1804         if (ret) {
1805                 if ((offset != 0) &&
1806                     (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1807                         /*
1808                          * meaning of NOT_VALID_ADDRESS:
1809                          * driver try to read chunk from address that is
1810                          * multiple of 2K and got an error since addr is empty.
1811                          * meaning of (offset != 0): driver already
1812                          * read valid data from another chunk so this case
1813                          * is not an error.
1814                          */
1815                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1816                                     "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1817                                     offset);
1818                         *len = 0;
1819                         ret = 0;
1820                 } else {
1821                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1822                                     "NVM access command failed with status %d\n", ret);
1823                         ret = EIO;
1824                 }
1825                 goto exit;
1826         }
1827
1828         if (offset_read != offset) {
1829                 device_printf(sc->sc_dev,
1830                     "NVM ACCESS response with invalid offset %d\n",
1831                     offset_read);
1832                 ret = EINVAL;
1833                 goto exit;
1834         }
1835
1836         if (bytes_read > length) {
1837                 device_printf(sc->sc_dev,
1838                     "NVM ACCESS response with too much data "
1839                     "(%d bytes requested, %d bytes received)\n",
1840                     length, bytes_read);
1841                 ret = EINVAL;
1842                 goto exit;
1843         }
1844
1845         /* Write data to NVM */
1846         memcpy(data + offset, resp_data, bytes_read);
1847         *len = bytes_read;
1848
1849  exit:
1850         iwm_free_resp(sc, &cmd);
1851         return ret;
1852 }
1853
1854 /*
1855  * Reads an NVM section completely.
1856  * NICs prior to 7000 family don't have a real NVM, but just read
1857  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1858  * by uCode, we need to manually check in this case that we don't
1859  * overflow and try to read more than the EEPROM size.
1860  * For 7000 family NICs, we supply the maximal size we can read, and
1861  * the uCode fills the response with as much data as we can,
1862  * without overflowing, so no check is needed.
1863  */
1864 static int
1865 iwm_nvm_read_section(struct iwm_softc *sc,
1866         uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1867 {
1868         uint16_t seglen, length, offset = 0;
1869         int ret;
1870
1871         /* Set nvm section read length */
1872         length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1873
1874         seglen = length;
1875
1876         /* Read the NVM until exhausted (reading less than requested) */
1877         while (seglen == length) {
1878                 /* Check no memory assumptions fail and cause an overflow */
1879                 if ((size_read + offset + length) >
1880                     sc->cfg->eeprom_size) {
1881                         device_printf(sc->sc_dev,
1882                             "EEPROM size is too small for NVM\n");
1883                         return ENOBUFS;
1884                 }
1885
1886                 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1887                 if (ret) {
1888                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1889                                     "Cannot read NVM from section %d offset %d, length %d\n",
1890                                     section, offset, length);
1891                         return ret;
1892                 }
1893                 offset += seglen;
1894         }
1895
1896         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1897                     "NVM section %d read completed\n", section);
1898         *len = offset;
1899         return 0;
1900 }
1901
1902 /*
1903  * BEGIN IWM_NVM_PARSE
1904  */
1905
1906 /* iwlwifi/iwl-nvm-parse.c */
1907
1908 /* NVM offsets (in words) definitions */
1909 enum iwm_nvm_offsets {
1910         /* NVM HW-Section offset (in words) definitions */
1911         IWM_HW_ADDR = 0x15,
1912
1913 /* NVM SW-Section offset (in words) definitions */
1914         IWM_NVM_SW_SECTION = 0x1C0,
1915         IWM_NVM_VERSION = 0,
1916         IWM_RADIO_CFG = 1,
1917         IWM_SKU = 2,
1918         IWM_N_HW_ADDRS = 3,
1919         IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1920
1921 /* NVM calibration section offset (in words) definitions */
1922         IWM_NVM_CALIB_SECTION = 0x2B8,
1923         IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1924 };
1925
1926 enum iwm_8000_nvm_offsets {
1927         /* NVM HW-Section offset (in words) definitions */
1928         IWM_HW_ADDR0_WFPM_8000 = 0x12,
1929         IWM_HW_ADDR1_WFPM_8000 = 0x16,
1930         IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1931         IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1932         IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1933
1934         /* NVM SW-Section offset (in words) definitions */
1935         IWM_NVM_SW_SECTION_8000 = 0x1C0,
1936         IWM_NVM_VERSION_8000 = 0,
1937         IWM_RADIO_CFG_8000 = 0,
1938         IWM_SKU_8000 = 2,
1939         IWM_N_HW_ADDRS_8000 = 3,
1940
1941         /* NVM REGULATORY -Section offset (in words) definitions */
1942         IWM_NVM_CHANNELS_8000 = 0,
1943         IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1944         IWM_NVM_LAR_OFFSET_8000 = 0x507,
1945         IWM_NVM_LAR_ENABLED_8000 = 0x7,
1946
1947         /* NVM calibration section offset (in words) definitions */
1948         IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1949         IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1950 };
1951
1952 /* SKU Capabilities (actual values from NVM definition) */
1953 enum nvm_sku_bits {
1954         IWM_NVM_SKU_CAP_BAND_24GHZ      = (1 << 0),
1955         IWM_NVM_SKU_CAP_BAND_52GHZ      = (1 << 1),
1956         IWM_NVM_SKU_CAP_11N_ENABLE      = (1 << 2),
1957         IWM_NVM_SKU_CAP_11AC_ENABLE     = (1 << 3),
1958 };
1959
1960 /* radio config bits (actual values from NVM definition) */
1961 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1962 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1963 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1964 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1965 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1966 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1967
1968 #define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)       (x & 0xF)
1969 #define IWM_NVM_RF_CFG_DASH_MSK_8000(x)         ((x >> 4) & 0xF)
1970 #define IWM_NVM_RF_CFG_STEP_MSK_8000(x)         ((x >> 8) & 0xF)
1971 #define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)         ((x >> 12) & 0xFFF)
1972 #define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)       ((x >> 24) & 0xF)
1973 #define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)       ((x >> 28) & 0xF)
1974
1975 #define DEFAULT_MAX_TX_POWER 16
1976
1977 /**
1978  * enum iwm_nvm_channel_flags - channel flags in NVM
1979  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1980  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1981  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1982  * @IWM_NVM_CHANNEL_RADAR: radar detection required
1983  * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1984  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1985  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1986  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1987  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1988  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1989  */
1990 enum iwm_nvm_channel_flags {
1991         IWM_NVM_CHANNEL_VALID = (1 << 0),
1992         IWM_NVM_CHANNEL_IBSS = (1 << 1),
1993         IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1994         IWM_NVM_CHANNEL_RADAR = (1 << 4),
1995         IWM_NVM_CHANNEL_DFS = (1 << 7),
1996         IWM_NVM_CHANNEL_WIDE = (1 << 8),
1997         IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1998         IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1999         IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2000 };
2001
2002 /*
2003  * Translate EEPROM flags to net80211.
2004  */
2005 static uint32_t
2006 iwm_eeprom_channel_flags(uint16_t ch_flags)
2007 {
2008         uint32_t nflags;
2009
2010         nflags = 0;
2011         if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
2012                 nflags |= IEEE80211_CHAN_PASSIVE;
2013         if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
2014                 nflags |= IEEE80211_CHAN_NOADHOC;
2015         if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
2016                 nflags |= IEEE80211_CHAN_DFS;
2017                 /* Just in case. */
2018                 nflags |= IEEE80211_CHAN_NOADHOC;
2019         }
2020
2021         return (nflags);
2022 }
2023
2024 static void
2025 iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
2026     int maxchans, int *nchans, int ch_idx, size_t ch_num,
2027     const uint8_t bands[])
2028 {
2029         const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2030         uint32_t nflags;
2031         uint16_t ch_flags;
2032         uint8_t ieee;
2033         int error;
2034
2035         for (; ch_idx < ch_num; ch_idx++) {
2036                 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2037                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2038                         ieee = iwm_nvm_channels[ch_idx];
2039                 else
2040                         ieee = iwm_nvm_channels_8000[ch_idx];
2041
2042                 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2043                         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2044                             "Ch. %d Flags %x [%sGHz] - No traffic\n",
2045                             ieee, ch_flags,
2046                             (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2047                             "5.2" : "2.4");
2048                         continue;
2049                 }
2050
2051                 nflags = iwm_eeprom_channel_flags(ch_flags);
2052                 error = ieee80211_add_channel(chans, maxchans, nchans,
2053                     ieee, 0, 0, nflags, bands);
2054                 if (error != 0)
2055                         break;
2056
2057                 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2058                     "Ch. %d Flags %x [%sGHz] - Added\n",
2059                     ieee, ch_flags,
2060                     (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2061                     "5.2" : "2.4");
2062         }
2063 }
2064
2065 static void
2066 iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2067     struct ieee80211_channel chans[])
2068 {
2069         struct iwm_softc *sc = ic->ic_softc;
2070         struct iwm_nvm_data *data = sc->nvm_data;
2071         uint8_t bands[IEEE80211_MODE_BYTES];
2072         size_t ch_num;
2073
2074         memset(bands, 0, sizeof(bands));
2075         /* 1-13: 11b/g channels. */
2076         setbit(bands, IEEE80211_MODE_11B);
2077         setbit(bands, IEEE80211_MODE_11G);
2078         iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2079             IWM_NUM_2GHZ_CHANNELS - 1, bands);
2080
2081         /* 14: 11b channel only. */
2082         clrbit(bands, IEEE80211_MODE_11G);
2083         iwm_add_channel_band(sc, chans, maxchans, nchans,
2084             IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2085
2086         if (data->sku_cap_band_52GHz_enable) {
2087                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2088                         ch_num = nitems(iwm_nvm_channels);
2089                 else
2090                         ch_num = nitems(iwm_nvm_channels_8000);
2091                 memset(bands, 0, sizeof(bands));
2092                 setbit(bands, IEEE80211_MODE_11A);
2093                 iwm_add_channel_band(sc, chans, maxchans, nchans,
2094                     IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2095         }
2096 }
2097
2098 static void
2099 iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2100         const uint16_t *mac_override, const uint16_t *nvm_hw)
2101 {
2102         const uint8_t *hw_addr;
2103
2104         if (mac_override) {
2105                 static const uint8_t reserved_mac[] = {
2106                         0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2107                 };
2108
2109                 hw_addr = (const uint8_t *)(mac_override +
2110                                  IWM_MAC_ADDRESS_OVERRIDE_8000);
2111
2112                 /*
2113                  * Store the MAC address from MAO section.
2114                  * No byte swapping is required in MAO section
2115                  */
2116                 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2117
2118                 /*
2119                  * Force the use of the OTP MAC address in case of reserved MAC
2120                  * address in the NVM, or if address is given but invalid.
2121                  */
2122                 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2123                     !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2124                     iwm_is_valid_ether_addr(data->hw_addr) &&
2125                     !IEEE80211_IS_MULTICAST(data->hw_addr))
2126                         return;
2127
2128                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2129                     "%s: mac address from nvm override section invalid\n",
2130                     __func__);
2131         }
2132
2133         if (nvm_hw) {
2134                 /* read the mac address from WFMP registers */
2135                 uint32_t mac_addr0 =
2136                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2137                 uint32_t mac_addr1 =
2138                     htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2139
2140                 hw_addr = (const uint8_t *)&mac_addr0;
2141                 data->hw_addr[0] = hw_addr[3];
2142                 data->hw_addr[1] = hw_addr[2];
2143                 data->hw_addr[2] = hw_addr[1];
2144                 data->hw_addr[3] = hw_addr[0];
2145
2146                 hw_addr = (const uint8_t *)&mac_addr1;
2147                 data->hw_addr[4] = hw_addr[1];
2148                 data->hw_addr[5] = hw_addr[0];
2149
2150                 return;
2151         }
2152
2153         device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2154         memset(data->hw_addr, 0, sizeof(data->hw_addr));
2155 }
2156
2157 static int
2158 iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2159             const uint16_t *phy_sku)
2160 {
2161         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2162                 return le16_to_cpup(nvm_sw + IWM_SKU);
2163
2164         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2165 }
2166
2167 static int
2168 iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2169 {
2170         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2171                 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2172         else
2173                 return le32_to_cpup((const uint32_t *)(nvm_sw +
2174                                                 IWM_NVM_VERSION_8000));
2175 }
2176
2177 static int
2178 iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2179                   const uint16_t *phy_sku)
2180 {
2181         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2182                 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2183
2184         return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2185 }
2186
2187 static int
2188 iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2189 {
2190         int n_hw_addr;
2191
2192         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2193                 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2194
2195         n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2196
2197         return n_hw_addr & IWM_N_HW_ADDR_MASK;
2198 }
2199
2200 static void
2201 iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2202                   uint32_t radio_cfg)
2203 {
2204         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2205                 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2206                 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2207                 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2208                 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2209                 return;
2210         }
2211
2212         /* set the radio configuration for family 8000 */
2213         data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2214         data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2215         data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2216         data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2217         data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2218         data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2219 }
2220
2221 static int
2222 iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2223                    const uint16_t *nvm_hw, const uint16_t *mac_override)
2224 {
2225 #ifdef notyet /* for FAMILY 9000 */
2226         if (cfg->mac_addr_from_csr) {
2227                 iwm_set_hw_address_from_csr(sc, data);
2228         } else
2229 #endif
2230         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2231                 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2232
2233                 /* The byte order is little endian 16 bit, meaning 214365 */
2234                 data->hw_addr[0] = hw_addr[1];
2235                 data->hw_addr[1] = hw_addr[0];
2236                 data->hw_addr[2] = hw_addr[3];
2237                 data->hw_addr[3] = hw_addr[2];
2238                 data->hw_addr[4] = hw_addr[5];
2239                 data->hw_addr[5] = hw_addr[4];
2240         } else {
2241                 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2242         }
2243
2244         if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2245                 device_printf(sc->sc_dev, "no valid mac address was found\n");
2246                 return EINVAL;
2247         }
2248
2249         return 0;
2250 }
2251
2252 static struct iwm_nvm_data *
2253 iwm_parse_nvm_data(struct iwm_softc *sc,
2254                    const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2255                    const uint16_t *nvm_calib, const uint16_t *mac_override,
2256                    const uint16_t *phy_sku, const uint16_t *regulatory)
2257 {
2258         struct iwm_nvm_data *data;
2259         uint32_t sku, radio_cfg;
2260
2261         if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2262                 data = malloc(sizeof(*data) +
2263                     IWM_NUM_CHANNELS * sizeof(uint16_t),
2264                     M_DEVBUF, M_NOWAIT | M_ZERO);
2265         } else {
2266                 data = malloc(sizeof(*data) +
2267                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2268                     M_DEVBUF, M_NOWAIT | M_ZERO);
2269         }
2270         if (!data)
2271                 return NULL;
2272
2273         data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2274
2275         radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2276         iwm_set_radio_cfg(sc, data, radio_cfg);
2277
2278         sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2279         data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2280         data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2281         data->sku_cap_11n_enable = 0;
2282
2283         data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2284
2285         /* If no valid mac address was found - bail out */
2286         if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2287                 free(data, M_DEVBUF);
2288                 return NULL;
2289         }
2290
2291         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2292                 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2293                     IWM_NUM_CHANNELS * sizeof(uint16_t));
2294         } else {
2295                 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2296                     IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2297         }
2298
2299         return data;
2300 }
2301
2302 static void
2303 iwm_free_nvm_data(struct iwm_nvm_data *data)
2304 {
2305         if (data != NULL)
2306                 free(data, M_DEVBUF);
2307 }
2308
2309 static struct iwm_nvm_data *
2310 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2311 {
2312         const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2313
2314         /* Checking for required sections */
2315         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2316                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2317                     !sections[sc->cfg->nvm_hw_section_num].data) {
2318                         device_printf(sc->sc_dev,
2319                             "Can't parse empty OTP/NVM sections\n");
2320                         return NULL;
2321                 }
2322         } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2323                 /* SW and REGULATORY sections are mandatory */
2324                 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2325                     !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2326                         device_printf(sc->sc_dev,
2327                             "Can't parse empty OTP/NVM sections\n");
2328                         return NULL;
2329                 }
2330                 /* MAC_OVERRIDE or at least HW section must exist */
2331                 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2332                     !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2333                         device_printf(sc->sc_dev,
2334                             "Can't parse mac_address, empty sections\n");
2335                         return NULL;
2336                 }
2337
2338                 /* PHY_SKU section is mandatory in B0 */
2339                 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2340                         device_printf(sc->sc_dev,
2341                             "Can't parse phy_sku in B0, empty sections\n");
2342                         return NULL;
2343                 }
2344         } else {
2345                 panic("unknown device family %d\n", sc->cfg->device_family);
2346         }
2347
2348         hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2349         sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2350         calib = (const uint16_t *)
2351             sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2352         regulatory = (const uint16_t *)
2353             sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2354         mac_override = (const uint16_t *)
2355             sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2356         phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2357
2358         return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2359             phy_sku, regulatory);
2360 }
2361
2362 static int
2363 iwm_nvm_init(struct iwm_softc *sc)
2364 {
2365         struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2366         int i, ret, section;
2367         uint32_t size_read = 0;
2368         uint8_t *nvm_buffer, *temp;
2369         uint16_t len;
2370
2371         memset(nvm_sections, 0, sizeof(nvm_sections));
2372
2373         if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2374                 return EINVAL;
2375
2376         /* load NVM values from nic */
2377         /* Read From FW NVM */
2378         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2379
2380         nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2381         if (!nvm_buffer)
2382                 return ENOMEM;
2383         for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2384                 /* we override the constness for initial read */
2385                 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2386                                            &len, size_read);
2387                 if (ret)
2388                         continue;
2389                 size_read += len;
2390                 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2391                 if (!temp) {
2392                         ret = ENOMEM;
2393                         break;
2394                 }
2395                 memcpy(temp, nvm_buffer, len);
2396
2397                 nvm_sections[section].data = temp;
2398                 nvm_sections[section].length = len;
2399         }
2400         if (!size_read)
2401                 device_printf(sc->sc_dev, "OTP is blank\n");
2402         free(nvm_buffer, M_DEVBUF);
2403
2404         sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2405         if (!sc->nvm_data)
2406                 return EINVAL;
2407         IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2408                     "nvm version = %x\n", sc->nvm_data->nvm_version);
2409
2410         for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2411                 if (nvm_sections[i].data != NULL)
2412                         free(nvm_sections[i].data, M_DEVBUF);
2413         }
2414
2415         return 0;
2416 }
2417
2418 /*
2419  * Firmware loading gunk.  This is kind of a weird hybrid between the
2420  * iwn driver and the Linux iwlwifi driver.
2421  */
2422
2423 static int
2424 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2425         const uint8_t *section, uint32_t byte_cnt)
2426 {
2427         int error = EINVAL;
2428         uint32_t chunk_sz, offset;
2429
2430         chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2431
2432         for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2433                 uint32_t addr, len;
2434                 const uint8_t *data;
2435
2436                 addr = dst_addr + offset;
2437                 len = MIN(chunk_sz, byte_cnt - offset);
2438                 data = section + offset;
2439
2440                 error = iwm_firmware_load_chunk(sc, addr, data, len);
2441                 if (error)
2442                         break;
2443         }
2444
2445         return error;
2446 }
2447
2448 static int
2449 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2450         const uint8_t *chunk, uint32_t byte_cnt)
2451 {
2452         struct iwm_dma_info *dma = &sc->fw_dma;
2453         int error;
2454
2455         /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2456         memcpy(dma->vaddr, chunk, byte_cnt);
2457         bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2458
2459         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2460             dst_addr <= IWM_FW_MEM_EXTENDED_END) {
2461                 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2462                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2463         }
2464
2465         sc->sc_fw_chunk_done = 0;
2466
2467         if (!iwm_nic_lock(sc))
2468                 return EBUSY;
2469
2470         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2471             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2472         IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2473             dst_addr);
2474         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2475             dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2476         IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2477             (iwm_get_dma_hi_addr(dma->paddr)
2478               << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2479         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2480             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2481             1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2482             IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2483         IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2484             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2485             IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2486             IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2487
2488         iwm_nic_unlock(sc);
2489
2490         /* wait 1s for this segment to load */
2491         while (!sc->sc_fw_chunk_done)
2492                 if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
2493                         break;
2494
2495         if (!sc->sc_fw_chunk_done) {
2496                 device_printf(sc->sc_dev,
2497                     "fw chunk addr 0x%x len %d failed to load\n",
2498                     dst_addr, byte_cnt);
2499         }
2500
2501         if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2502             dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
2503                 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2504                     IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2505                 iwm_nic_unlock(sc);
2506         }
2507
2508         return error;
2509 }
2510
2511 int
2512 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
2513     int cpu, int *first_ucode_section)
2514 {
2515         int shift_param;
2516         int i, error = 0, sec_num = 0x1;
2517         uint32_t val, last_read_idx = 0;
2518         const void *data;
2519         uint32_t dlen;
2520         uint32_t offset;
2521
2522         if (cpu == 1) {
2523                 shift_param = 0;
2524                 *first_ucode_section = 0;
2525         } else {
2526                 shift_param = 16;
2527                 (*first_ucode_section)++;
2528         }
2529
2530         for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2531                 last_read_idx = i;
2532                 data = fws->fw_sect[i].fws_data;
2533                 dlen = fws->fw_sect[i].fws_len;
2534                 offset = fws->fw_sect[i].fws_devoff;
2535
2536                 /*
2537                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2538                  * CPU1 to CPU2.
2539                  * PAGING_SEPARATOR_SECTION delimiter - separate between
2540                  * CPU2 non paged to CPU2 paging sec.
2541                  */
2542                 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2543                     offset == IWM_PAGING_SEPARATOR_SECTION)
2544                         break;
2545
2546                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2547                     "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
2548                     i, offset, dlen, cpu);
2549
2550                 if (dlen > sc->sc_fwdmasegsz) {
2551                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2552                             "chunk %d too large (%d bytes)\n", i, dlen);
2553                         error = EFBIG;
2554                 } else {
2555                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2556                 }
2557                 if (error) {
2558                         device_printf(sc->sc_dev,
2559                             "could not load firmware chunk %d (error %d)\n",
2560                             i, error);
2561                         return error;
2562                 }
2563
2564                 /* Notify the ucode of the loaded section number and status */
2565                 if (iwm_nic_lock(sc)) {
2566                         val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2567                         val = val | (sec_num << shift_param);
2568                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2569                         sec_num = (sec_num << 1) | 0x1;
2570                         iwm_nic_unlock(sc);
2571
2572                         /*
2573                          * The firmware won't load correctly without this delay.
2574                          */
2575                         DELAY(8000);
2576                 }
2577         }
2578
2579         *first_ucode_section = last_read_idx;
2580
2581         if (iwm_nic_lock(sc)) {
2582                 if (cpu == 1)
2583                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2584                 else
2585                         IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2586                 iwm_nic_unlock(sc);
2587         }
2588
2589         return 0;
2590 }
2591
2592 int
2593 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2594 {
2595         struct iwm_fw_sects *fws;
2596         int error = 0;
2597         int first_ucode_section;
2598
2599         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
2600             ucode_type);
2601
2602         fws = &sc->sc_fw.fw_sects[ucode_type];
2603
2604         /* configure the ucode to be ready to get the secured image */
2605         /* release CPU reset */
2606         iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2607
2608         /* load to FW the binary Secured sections of CPU1 */
2609         error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
2610         if (error)
2611                 return error;
2612
2613         /* load to FW the binary sections of CPU2 */
2614         return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
2615 }
2616
2617 static int
2618 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2619 {
2620         struct iwm_fw_sects *fws;
2621         int error, i;
2622         const void *data;
2623         uint32_t dlen;
2624         uint32_t offset;
2625
2626         sc->sc_uc.uc_intr = 0;
2627
2628         fws = &sc->sc_fw.fw_sects[ucode_type];
2629         for (i = 0; i < fws->fw_count; i++) {
2630                 data = fws->fw_sect[i].fws_data;
2631                 dlen = fws->fw_sect[i].fws_len;
2632                 offset = fws->fw_sect[i].fws_devoff;
2633                 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2634                     "LOAD FIRMWARE type %d offset %u len %d\n",
2635                     ucode_type, offset, dlen);
2636                 if (dlen > sc->sc_fwdmasegsz) {
2637                         IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
2638                             "chunk %d too large (%d bytes)\n", i, dlen);
2639                         error = EFBIG;
2640                 } else {
2641                         error = iwm_firmware_load_sect(sc, offset, data, dlen);
2642                 }
2643                 if (error) {
2644                         device_printf(sc->sc_dev,
2645                             "could not load firmware chunk %u of %u "
2646                             "(error=%d)\n", i, fws->fw_count, error);
2647                         return error;
2648                 }
2649         }
2650
2651         IWM_WRITE(sc, IWM_CSR_RESET, 0);
2652
2653         return 0;
2654 }
2655
2656 static int
2657 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2658 {
2659         int error, w;
2660
2661         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2662                 error = iwm_load_firmware_8000(sc, ucode_type);
2663         else
2664                 error = iwm_load_firmware_7000(sc, ucode_type);
2665         if (error)
2666                 return error;
2667
2668         /* wait for the firmware to load */
2669         for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2670                 error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
2671         }
2672         if (error || !sc->sc_uc.uc_ok) {
2673                 device_printf(sc->sc_dev, "could not load firmware\n");
2674                 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2675                         device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
2676                             iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
2677                         device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
2678                             iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2679                 }
2680         }
2681
2682         /*
2683          * Give the firmware some time to initialize.
2684          * Accessing it too early causes errors.
2685          */
2686         msleep(&w, &sc->sc_mtx, 0, "iwmfwinit", hz);
2687
2688         return error;
2689 }
2690
2691 /* iwlwifi: pcie/trans.c */
2692 static int
2693 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2694 {
2695         int error;
2696
2697         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2698
2699         if ((error = iwm_nic_init(sc)) != 0) {
2700                 device_printf(sc->sc_dev, "unable to init nic\n");
2701                 return error;
2702         }
2703
2704         /* make sure rfkill handshake bits are cleared */
2705         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2706         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2707             IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2708
2709         /* clear (again), then enable host interrupts */
2710         IWM_WRITE(sc, IWM_CSR_INT, ~0);
2711         iwm_enable_interrupts(sc);
2712
2713         /* really make sure rfkill handshake bits are cleared */
2714         /* maybe we should write a few times more?  just to make sure */
2715         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2716         IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2717
2718         /* Load the given image to the HW */
2719         return iwm_load_firmware(sc, ucode_type);
2720 }
2721
2722 static int
2723 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2724 {
2725         struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2726                 .valid = htole32(valid_tx_ant),
2727         };
2728
2729         return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2730             IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2731 }
2732
2733 /* iwlwifi: mvm/fw.c */
2734 static int
2735 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2736 {
2737         struct iwm_phy_cfg_cmd phy_cfg_cmd;
2738         enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2739
2740         /* Set parameters */
2741         phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2742         phy_cfg_cmd.calib_control.event_trigger =
2743             sc->sc_default_calib[ucode_type].event_trigger;
2744         phy_cfg_cmd.calib_control.flow_trigger =
2745             sc->sc_default_calib[ucode_type].flow_trigger;
2746
2747         IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2748             "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2749         return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2750             sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2751 }
2752
2753 static int
2754 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2755         enum iwm_ucode_type ucode_type)
2756 {
2757         enum iwm_ucode_type old_type = sc->sc_uc_current;
2758         int error;
2759
2760         if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2761                 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2762                         error);
2763                 return error;
2764         }
2765
2766         sc->sc_uc_current = ucode_type;
2767         error = iwm_start_fw(sc, ucode_type);
2768         if (error) {
2769                 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2770                 sc->sc_uc_current = old_type;
2771                 return error;
2772         }
2773
2774         error = iwm_post_alive(sc);
2775         if (error) {
2776                 device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
2777         }
2778         return error;
2779 }
2780
2781 /*
2782  * mvm misc bits
2783  */
2784
2785 /*
2786  * follows iwlwifi/fw.c
2787  */
2788 static int
2789 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2790 {
2791         int error;
2792
2793         /* do not operate with rfkill switch turned on */
2794         if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2795                 device_printf(sc->sc_dev,
2796                     "radio is disabled by hardware switch\n");
2797                 return EPERM;
2798         }
2799
2800         sc->sc_init_complete = 0;
2801         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2802             IWM_UCODE_INIT)) != 0) {
2803                 device_printf(sc->sc_dev, "failed to load init firmware\n");
2804                 return error;
2805         }
2806
2807         if (justnvm) {
2808                 if ((error = iwm_nvm_init(sc)) != 0) {
2809                         device_printf(sc->sc_dev, "failed to read nvm\n");
2810                         return error;
2811                 }
2812                 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2813
2814                 return 0;
2815         }
2816
2817         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
2818                 device_printf(sc->sc_dev,
2819                     "failed to send bt coex configuration: %d\n", error);
2820                 return error;
2821         }
2822
2823         /* Init Smart FIFO. */
2824         error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2825         if (error != 0)
2826                 return error;
2827
2828 #if 0
2829         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2830             "%s: phy_txant=0x%08x, nvm_valid_tx_ant=0x%02x, valid=0x%02x\n",
2831             __func__,
2832             ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2833               >> IWM_FW_PHY_CFG_TX_CHAIN_POS),
2834             sc->nvm_data->valid_tx_ant,
2835             iwm_fw_valid_tx_ant(sc));
2836 #endif
2837
2838         /* Send TX valid antennas before triggering calibrations */
2839         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
2840         if (error != 0) {
2841                 device_printf(sc->sc_dev,
2842                     "failed to send antennas before calibration: %d\n", error);
2843                 return error;
2844         }
2845
2846         /*
2847          * Send phy configurations command to init uCode
2848          * to start the 16.0 uCode init image internal calibrations.
2849          */
2850         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2851                 device_printf(sc->sc_dev,
2852                     "%s: failed to run internal calibration: %d\n",
2853                     __func__, error);
2854                 return error;
2855         }
2856
2857         /*
2858          * Nothing to do but wait for the init complete notification
2859          * from the firmware
2860          */
2861         while (!sc->sc_init_complete) {
2862                 error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
2863                                  0, "iwminit", 2*hz);
2864                 if (error) {
2865                         device_printf(sc->sc_dev, "init complete failed: %d\n",
2866                                 sc->sc_init_complete);
2867                         break;
2868                 }
2869         }
2870
2871         IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
2872             sc->sc_init_complete ? "" : "not ");
2873
2874         return error;
2875 }
2876
2877 /*
2878  * receive side
2879  */
2880
2881 /* (re)stock rx ring, called at init-time and at runtime */
2882 static int
2883 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2884 {
2885         struct iwm_rx_ring *ring = &sc->rxq;
2886         struct iwm_rx_data *data = &ring->data[idx];
2887         struct mbuf *m;
2888         bus_dmamap_t dmamap = NULL;
2889         bus_dma_segment_t seg;
2890         int nsegs, error;
2891
2892         m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
2893         if (m == NULL)
2894                 return ENOBUFS;
2895
2896         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2897         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
2898             &seg, &nsegs, BUS_DMA_NOWAIT);
2899         if (error != 0) {
2900                 device_printf(sc->sc_dev,
2901                     "%s: can't map mbuf, error %d\n", __func__, error);
2902                 goto fail;
2903         }
2904
2905         if (data->m != NULL)
2906                 bus_dmamap_unload(ring->data_dmat, data->map);
2907
2908         /* Swap ring->spare_map with data->map */
2909         dmamap = data->map;
2910         data->map = ring->spare_map;
2911         ring->spare_map = dmamap;
2912
2913         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
2914         data->m = m;
2915
2916         /* Update RX descriptor. */
2917         KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
2918         ring->desc[idx] = htole32(seg.ds_addr >> 8);
2919         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2920             BUS_DMASYNC_PREWRITE);
2921
2922         return 0;
2923 fail:
2924         m_freem(m);
2925         return error;
2926 }
2927
2928 /* iwlwifi: mvm/rx.c */
2929 #define IWM_RSSI_OFFSET 50
2930 static int
2931 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2932 {
2933         int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
2934         uint32_t agc_a, agc_b;
2935         uint32_t val;
2936
2937         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
2938         agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
2939         agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
2940
2941         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
2942         rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
2943         rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
2944
2945         /*
2946          * dBm = rssi dB - agc dB - constant.
2947          * Higher AGC (higher radio gain) means lower signal.
2948          */
2949         rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
2950         rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
2951         max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
2952
2953         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2954             "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
2955             rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
2956
2957         return max_rssi_dbm;
2958 }
2959
2960 /* iwlwifi: mvm/rx.c */
2961 /*
2962  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
2963  * values are reported by the fw as positive values - need to negate
2964  * to obtain their dBM.  Account for missing antennas by replacing 0
2965  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
2966  */
2967 static int
2968 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
2969 {
2970         int energy_a, energy_b, energy_c, max_energy;
2971         uint32_t val;
2972
2973         val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
2974         energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
2975             IWM_RX_INFO_ENERGY_ANT_A_POS;
2976         energy_a = energy_a ? -energy_a : -256;
2977         energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
2978             IWM_RX_INFO_ENERGY_ANT_B_POS;
2979         energy_b = energy_b ? -energy_b : -256;
2980         energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
2981             IWM_RX_INFO_ENERGY_ANT_C_POS;
2982         energy_c = energy_c ? -energy_c : -256;
2983         max_energy = MAX(energy_a, energy_b);
2984         max_energy = MAX(max_energy, energy_c);
2985
2986         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
2987             "energy In A %d B %d C %d , and max %d\n",
2988             energy_a, energy_b, energy_c, max_energy);
2989
2990         return max_energy;
2991 }
2992
2993 static void
2994 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
2995         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
2996 {
2997         struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
2998
2999         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3000         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3001
3002         memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3003 }
3004
3005 /*
3006  * Retrieve the average noise (in dBm) among receivers.
3007  */
3008 static int
3009 iwm_get_noise(struct iwm_softc *sc,
3010     const struct iwm_mvm_statistics_rx_non_phy *stats)
3011 {
3012         int i, total, nbant, noise;
3013
3014         total = nbant = noise = 0;
3015         for (i = 0; i < 3; i++) {
3016                 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3017                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3018                     __func__,
3019                     i,
3020                     noise);
3021
3022                 if (noise) {
3023                         total += noise;
3024                         nbant++;
3025                 }
3026         }
3027
3028         IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3029             __func__, nbant, total);
3030 #if 0
3031         /* There should be at least one antenna but check anyway. */
3032         return (nbant == 0) ? -127 : (total / nbant) - 107;
3033 #else
3034         /* For now, just hard-code it to -96 to be safe */
3035         return (-96);
3036 #endif
3037 }
3038
3039 /*
3040  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3041  *
3042  * Handles the actual data of the Rx packet from the fw
3043  */
3044 static void
3045 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3046         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3047 {
3048         struct ieee80211com *ic = &sc->sc_ic;
3049         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3050         struct ieee80211_frame *wh;
3051         struct ieee80211_node *ni;
3052         struct ieee80211_rx_stats rxs;
3053         struct mbuf *m;
3054         struct iwm_rx_phy_info *phy_info;
3055         struct iwm_rx_mpdu_res_start *rx_res;
3056         uint32_t len;
3057         uint32_t rx_pkt_status;
3058         int rssi;
3059
3060         bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3061
3062         phy_info = &sc->sc_last_phy_info;
3063         rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3064         wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3065         len = le16toh(rx_res->byte_count);
3066         rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3067
3068         m = data->m;
3069         m->m_data = pkt->data + sizeof(*rx_res);
3070         m->m_pkthdr.len = m->m_len = len;
3071
3072         if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3073                 device_printf(sc->sc_dev,
3074                     "dsp size out of range [0,20]: %d\n",
3075                     phy_info->cfg_phy_cnt);
3076                 goto fail;
3077         }
3078
3079         if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3080             !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3081                 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3082                     "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3083                 goto fail;
3084         }
3085
3086         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3087                 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3088         } else {
3089                 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3090         }
3091
3092         /* Note: RSSI is absolute (ie a -ve value) */
3093         if (rssi < IWM_MIN_DBM)
3094                 rssi = IWM_MIN_DBM;
3095         else if (rssi > IWM_MAX_DBM)
3096                 rssi = IWM_MAX_DBM;
3097
3098         /* Map it to relative value */
3099         rssi = rssi - sc->sc_noise;
3100
3101         /* replenish ring for the buffer we're going to feed to the sharks */
3102         if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3103                 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3104                     __func__);
3105                 goto fail;
3106         }
3107
3108         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3109             "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3110
3111         ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3112
3113         IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3114             "%s: phy_info: channel=%d, flags=0x%08x\n",
3115             __func__,
3116             le16toh(phy_info->channel),
3117             le16toh(phy_info->phy_flags));
3118
3119         /*
3120          * Populate an RX state struct with the provided information.
3121          */
3122         bzero(&rxs, sizeof(rxs));
3123         rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3124         rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3125         rxs.c_ieee = le16toh(phy_info->channel);
3126         if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3127                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3128         } else {
3129                 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3130         }
3131
3132         /* rssi is in 1/2db units */
3133         rxs.c_rssi = rssi * 2;
3134         rxs.c_nf = sc->sc_noise;
3135         if (ieee80211_add_rx_params(m, &rxs) == 0) {
3136                 if (ni)
3137                         ieee80211_free_node(ni);
3138                 goto fail;
3139         }
3140
3141         if (ieee80211_radiotap_active_vap(vap)) {
3142                 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3143
3144                 tap->wr_flags = 0;
3145                 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3146                         tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3147                 tap->wr_chan_freq = htole16(rxs.c_freq);
3148                 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3149                 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3150                 tap->wr_dbm_antsignal = (int8_t)rssi;
3151                 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3152                 tap->wr_tsft = phy_info->system_timestamp;
3153                 switch (phy_info->rate) {
3154                 /* CCK rates. */
3155                 case  10: tap->wr_rate =   2; break;
3156                 case  20: tap->wr_rate =   4; break;
3157                 case  55: tap->wr_rate =  11; break;
3158                 case 110: tap->wr_rate =  22; break;
3159                 /* OFDM rates. */
3160                 case 0xd: tap->wr_rate =  12; break;
3161                 case 0xf: tap->wr_rate =  18; break;
3162                 case 0x5: tap->wr_rate =  24; break;
3163                 case 0x7: tap->wr_rate =  36; break;
3164                 case 0x9: tap->wr_rate =  48; break;
3165                 case 0xb: tap->wr_rate =  72; break;
3166                 case 0x1: tap->wr_rate =  96; break;
3167                 case 0x3: tap->wr_rate = 108; break;
3168                 /* Unknown rate: should not happen. */
3169                 default:  tap->wr_rate =   0;
3170                 }
3171         }
3172
3173         IWM_UNLOCK(sc);
3174         if (ni != NULL) {
3175                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3176                 ieee80211_input_mimo(ni, m);
3177                 ieee80211_free_node(ni);
3178         } else {
3179                 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3180                 ieee80211_input_mimo_all(ic, m);
3181         }
3182         IWM_LOCK(sc);
3183
3184         return;
3185
3186 fail:
3187         counter_u64_add(ic->ic_ierrors, 1);
3188 }
3189
3190 static int
3191 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3192         struct iwm_node *in)
3193 {
3194         struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3195         struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3196         struct ieee80211_node *ni = &in->in_ni;
3197         int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3198
3199         KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3200
3201         /* Update rate control statistics. */
3202         IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3203             __func__,
3204             (int) le16toh(tx_resp->status.status),
3205             (int) le16toh(tx_resp->status.sequence),
3206             tx_resp->frame_count,
3207             tx_resp->bt_kill_count,
3208             tx_resp->failure_rts,
3209             tx_resp->failure_frame,
3210             le32toh(tx_resp->initial_rate),
3211             (int) le16toh(tx_resp->wireless_media_time));
3212
3213         txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3214                      IEEE80211_RATECTL_STATUS_LONG_RETRY;
3215         txs->short_retries = tx_resp->failure_rts;
3216         txs->long_retries = tx_resp->failure_frame;
3217         if (status != IWM_TX_STATUS_SUCCESS &&
3218             status != IWM_TX_STATUS_DIRECT_DONE) {
3219                 switch (status) {
3220                 case IWM_TX_STATUS_FAIL_SHORT_LIMIT:
3221                         txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3222                         break;
3223                 case IWM_TX_STATUS_FAIL_LONG_LIMIT:
3224                         txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3225                         break;
3226                 case IWM_TX_STATUS_FAIL_LIFE_EXPIRE:
3227                         txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3228                         break;
3229                 default:
3230                         txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3231                         break;
3232                 }
3233         } else {
3234                 txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3235         }
3236         ieee80211_ratectl_tx_complete(ni, txs);
3237
3238         return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3239 }
3240
3241 static void
3242 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3243         struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3244 {
3245         struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3246         int idx = cmd_hdr->idx;
3247         int qid = cmd_hdr->qid;
3248         struct iwm_tx_ring *ring = &sc->txq[qid];
3249         struct iwm_tx_data *txd = &ring->data[idx];
3250         struct iwm_node *in = txd->in;
3251         struct mbuf *m = txd->m;
3252         int status;
3253
3254         KASSERT(txd->done == 0, ("txd not done"));
3255         KASSERT(txd->in != NULL, ("txd without node"));
3256         KASSERT(txd->m != NULL, ("txd without mbuf"));
3257
3258         bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3259
3260         sc->sc_tx_timer = 0;
3261
3262         status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3263
3264         /* Unmap and free mbuf. */
3265         bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3266         bus_dmamap_unload(ring->data_dmat, txd->map);
3267
3268         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3269             "free txd %p, in %p\n", txd, txd->in);
3270         txd->done = 1;
3271         txd->m = NULL;
3272         txd->in = NULL;
3273
3274         ieee80211_tx_complete(&in->in_ni, m, status);
3275
3276         if (--ring->queued < IWM_TX_RING_LOMARK) {
3277                 sc->qfullmsk &= ~(1 << ring->qid);
3278                 if (sc->qfullmsk == 0) {
3279                         /*
3280                          * Well, we're in interrupt context, but then again
3281                          * I guess net80211 does all sorts of stunts in
3282                          * interrupt context, so maybe this is no biggie.
3283                          */
3284                         iwm_start(sc);
3285                 }
3286         }
3287 }
3288
3289 /*
3290  * transmit side
3291  */
3292
3293 /*
3294  * Process a "command done" firmware notification.  This is where we wakeup
3295  * processes waiting for a synchronous command completion.
3296  * from if_iwn
3297  */
3298 static void
3299 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3300 {
3301         struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3302         struct iwm_tx_data *data;
3303
3304         if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3305                 return; /* Not a command ack. */
3306         }
3307
3308         /* XXX wide commands? */
3309         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3310             "cmd notification type 0x%x qid %d idx %d\n",
3311             pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3312
3313         data = &ring->data[pkt->hdr.idx];
3314
3315         /* If the command was mapped in an mbuf, free it. */
3316         if (data->m != NULL) {
3317                 bus_dmamap_sync(ring->data_dmat, data->map,
3318                     BUS_DMASYNC_POSTWRITE);
3319                 bus_dmamap_unload(ring->data_dmat, data->map);
3320                 m_freem(data->m);
3321                 data->m = NULL;
3322         }
3323         wakeup(&ring->desc[pkt->hdr.idx]);
3324 }
3325
3326 #if 0
3327 /*
3328  * necessary only for block ack mode
3329  */
3330 void
3331 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3332         uint16_t len)
3333 {
3334         struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3335         uint16_t w_val;
3336
3337         scd_bc_tbl = sc->sched_dma.vaddr;
3338
3339         len += 8; /* magic numbers came naturally from paris */
3340         if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3341                 len = roundup(len, 4) / 4;
3342
3343         w_val = htole16(sta_id << 12 | len);
3344
3345         /* Update TX scheduler. */
3346         scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3347         bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3348             BUS_DMASYNC_PREWRITE);
3349
3350         /* I really wonder what this is ?!? */
3351         if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3352                 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3353                 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3354                     BUS_DMASYNC_PREWRITE);
3355         }
3356 }
3357 #endif
3358
3359 /*
3360  * Take an 802.11 (non-n) rate, find the relevant rate
3361  * table entry.  return the index into in_ridx[].
3362  *
3363  * The caller then uses that index back into in_ridx
3364  * to figure out the rate index programmed /into/
3365  * the firmware for this given node.
3366  */
3367 static int
3368 iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3369     uint8_t rate)
3370 {
3371         int i;
3372         uint8_t r;
3373
3374         for (i = 0; i < nitems(in->in_ridx); i++) {
3375                 r = iwm_rates[in->in_ridx[i]].rate;
3376                 if (rate == r)
3377                         return (i);
3378         }
3379
3380         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3381             "%s: couldn't find an entry for rate=%d\n",
3382             __func__,
3383             rate);
3384
3385         /* XXX Return the first */
3386         /* XXX TODO: have it return the /lowest/ */
3387         return (0);
3388 }
3389
3390 static int
3391 iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3392 {
3393         int i;
3394
3395         for (i = 0; i < nitems(iwm_rates); i++) {
3396                 if (iwm_rates[i].rate == rate)
3397                         return (i);
3398         }
3399         /* XXX error? */
3400         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3401             "%s: couldn't find an entry for rate=%d\n",
3402             __func__,
3403             rate);
3404         return (0);
3405 }
3406
3407 /*
3408  * Fill in the rate related information for a transmit command.
3409  */
3410 static const struct iwm_rate *
3411 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3412         struct mbuf *m, struct iwm_tx_cmd *tx)
3413 {
3414         struct ieee80211_node *ni = &in->in_ni;
3415         struct ieee80211_frame *wh;
3416         const struct ieee80211_txparam *tp = ni->ni_txparms;
3417         const struct iwm_rate *rinfo;
3418         int type;
3419         int ridx, rate_flags;
3420
3421         wh = mtod(m, struct ieee80211_frame *);
3422         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3423
3424         tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3425         tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3426
3427         if (type == IEEE80211_FC0_TYPE_MGT) {
3428                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3429                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3430                     "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3431         } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3432                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3433                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3434                     "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3435         } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3436                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3437                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3438                     "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3439         } else if (m->m_flags & M_EAPOL) {
3440                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3441                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3442                     "%s: EAPOL\n", __func__);
3443         } else if (type == IEEE80211_FC0_TYPE_DATA) {
3444                 int i;
3445
3446                 /* for data frames, use RS table */
3447                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3448                 /* XXX pass pktlen */
3449                 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3450                 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3451                 ridx = in->in_ridx[i];
3452
3453                 /* This is the index into the programmed table */
3454                 tx->initial_rate_index = i;
3455                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3456
3457                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3458                     "%s: start with i=%d, txrate %d\n",
3459                     __func__, i, iwm_rates[ridx].rate);
3460         } else {
3461                 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3462                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3463                     __func__, tp->mgmtrate);
3464         }
3465
3466         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3467             "%s: frame type=%d txrate %d\n",
3468                 __func__, type, iwm_rates[ridx].rate);
3469
3470         rinfo = &iwm_rates[ridx];
3471
3472         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3473             __func__, ridx,
3474             rinfo->rate,
3475             !! (IWM_RIDX_IS_CCK(ridx))
3476             );
3477
3478         /* XXX TODO: hard-coded TX antenna? */
3479         rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3480         if (IWM_RIDX_IS_CCK(ridx))
3481                 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3482         tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3483
3484         return rinfo;
3485 }
3486
3487 #define TB0_SIZE 16
3488 static int
3489 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3490 {
3491         struct ieee80211com *ic = &sc->sc_ic;
3492         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3493         struct iwm_node *in = IWM_NODE(ni);
3494         struct iwm_tx_ring *ring;
3495         struct iwm_tx_data *data;
3496         struct iwm_tfd *desc;
3497         struct iwm_device_cmd *cmd;
3498         struct iwm_tx_cmd *tx;
3499         struct ieee80211_frame *wh;
3500         struct ieee80211_key *k = NULL;
3501         struct mbuf *m1;
3502         const struct iwm_rate *rinfo;
3503         uint32_t flags;
3504         u_int hdrlen;
3505         bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3506         int nsegs;
3507         uint8_t tid, type;
3508         int i, totlen, error, pad;
3509
3510         wh = mtod(m, struct ieee80211_frame *);
3511         hdrlen = ieee80211_anyhdrsize(wh);
3512         type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3513         tid = 0;
3514         ring = &sc->txq[ac];
3515         desc = &ring->desc[ring->cur];
3516         memset(desc, 0, sizeof(*desc));
3517         data = &ring->data[ring->cur];
3518
3519         /* Fill out iwm_tx_cmd to send to the firmware */
3520         cmd = &ring->cmd[ring->cur];
3521         cmd->hdr.code = IWM_TX_CMD;
3522         cmd->hdr.flags = 0;
3523         cmd->hdr.qid = ring->qid;
3524         cmd->hdr.idx = ring->cur;
3525
3526         tx = (void *)cmd->data;
3527         memset(tx, 0, sizeof(*tx));
3528
3529         rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3530
3531         /* Encrypt the frame if need be. */
3532         if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3533                 /* Retrieve key for TX && do software encryption. */
3534                 k = ieee80211_crypto_encap(ni, m);
3535                 if (k == NULL) {
3536                         m_freem(m);
3537                         return (ENOBUFS);
3538                 }
3539                 /* 802.11 header may have moved. */
3540                 wh = mtod(m, struct ieee80211_frame *);
3541         }
3542
3543         if (ieee80211_radiotap_active_vap(vap)) {
3544                 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3545
3546                 tap->wt_flags = 0;
3547                 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3548                 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3549                 tap->wt_rate = rinfo->rate;
3550                 if (k != NULL)
3551                         tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3552                 ieee80211_radiotap_tx(vap, m);
3553         }
3554
3555
3556         totlen = m->m_pkthdr.len;
3557
3558         flags = 0;
3559         if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3560                 flags |= IWM_TX_CMD_FLG_ACK;
3561         }
3562
3563         if (type == IEEE80211_FC0_TYPE_DATA
3564             && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3565             && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3566                 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3567         }
3568
3569         if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3570             type != IEEE80211_FC0_TYPE_DATA)
3571                 tx->sta_id = sc->sc_aux_sta.sta_id;
3572         else
3573                 tx->sta_id = IWM_STATION_ID;
3574
3575         if (type == IEEE80211_FC0_TYPE_MGT) {
3576                 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3577
3578                 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3579                     subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3580                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3581                 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3582                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3583                 } else {
3584                         tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3585                 }
3586         } else {
3587                 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3588         }
3589
3590         if (hdrlen & 3) {
3591                 /* First segment length must be a multiple of 4. */
3592                 flags |= IWM_TX_CMD_FLG_MH_PAD;
3593                 pad = 4 - (hdrlen & 3);
3594         } else
3595                 pad = 0;
3596
3597         tx->driver_txop = 0;
3598         tx->next_frame_len = 0;
3599
3600         tx->len = htole16(totlen);
3601         tx->tid_tspec = tid;
3602         tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3603
3604         /* Set physical address of "scratch area". */
3605         tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3606         tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3607
3608         /* Copy 802.11 header in TX command. */
3609         memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3610
3611         flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3612
3613         tx->sec_ctl = 0;
3614         tx->tx_flags |= htole32(flags);
3615
3616         /* Trim 802.11 header. */
3617         m_adj(m, hdrlen);
3618         error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3619             segs, &nsegs, BUS_DMA_NOWAIT);
3620         if (error != 0) {
3621                 if (error != EFBIG) {
3622                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3623                             error);
3624                         m_freem(m);
3625                         return error;
3626                 }
3627                 /* Too many DMA segments, linearize mbuf. */
3628                 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3629                 if (m1 == NULL) {
3630                         device_printf(sc->sc_dev,
3631                             "%s: could not defrag mbuf\n", __func__);
3632                         m_freem(m);
3633                         return (ENOBUFS);
3634                 }
3635                 m = m1;
3636
3637                 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3638                     segs, &nsegs, BUS_DMA_NOWAIT);
3639                 if (error != 0) {
3640                         device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3641                             error);
3642                         m_freem(m);
3643                         return error;
3644                 }
3645         }
3646         data->m = m;
3647         data->in = in;
3648         data->done = 0;
3649
3650         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3651             "sending txd %p, in %p\n", data, data->in);
3652         KASSERT(data->in != NULL, ("node is NULL"));
3653
3654         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3655             "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3656             ring->qid, ring->cur, totlen, nsegs,
3657             le32toh(tx->tx_flags),
3658             le32toh(tx->rate_n_flags),
3659             tx->initial_rate_index
3660             );
3661
3662         /* Fill TX descriptor. */
3663         desc->num_tbs = 2 + nsegs;
3664
3665         desc->tbs[0].lo = htole32(data->cmd_paddr);
3666         desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3667             (TB0_SIZE << 4);
3668         desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3669         desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3670             ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3671               + hdrlen + pad - TB0_SIZE) << 4);
3672
3673         /* Other DMA segments are for data payload. */
3674         for (i = 0; i < nsegs; i++) {
3675                 seg = &segs[i];
3676                 desc->tbs[i+2].lo = htole32(seg->ds_addr);
3677                 desc->tbs[i+2].hi_n_len = \
3678                     htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3679                     | ((seg->ds_len) << 4);
3680         }
3681
3682         bus_dmamap_sync(ring->data_dmat, data->map,
3683             BUS_DMASYNC_PREWRITE);
3684         bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3685             BUS_DMASYNC_PREWRITE);
3686         bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3687             BUS_DMASYNC_PREWRITE);
3688
3689 #if 0
3690         iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3691 #endif
3692
3693         /* Kick TX ring. */
3694         ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3695         IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3696
3697         /* Mark TX ring as full if we reach a certain threshold. */
3698         if (++ring->queued > IWM_TX_RING_HIMARK) {
3699                 sc->qfullmsk |= 1 << ring->qid;
3700         }
3701
3702         return 0;
3703 }
3704
3705 static int
3706 iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3707     const struct ieee80211_bpf_params *params)
3708 {
3709         struct ieee80211com *ic = ni->ni_ic;
3710         struct iwm_softc *sc = ic->ic_softc;
3711         int error = 0;
3712
3713         IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3714             "->%s begin\n", __func__);
3715
3716         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3717                 m_freem(m);
3718                 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3719                     "<-%s not RUNNING\n", __func__);
3720                 return (ENETDOWN);
3721         }
3722
3723         IWM_LOCK(sc);
3724         /* XXX fix this */
3725         if (params == NULL) {
3726                 error = iwm_tx(sc, m, ni, 0);
3727         } else {
3728                 error = iwm_tx(sc, m, ni, 0);
3729         }
3730         sc->sc_tx_timer = 5;
3731         IWM_UNLOCK(sc);
3732
3733         return (error);
3734 }
3735
3736 /*
3737  * mvm/tx.c
3738  */
3739
3740 /*
3741  * Note that there are transports that buffer frames before they reach
3742  * the firmware. This means that after flush_tx_path is called, the
3743  * queue might not be empty. The race-free way to handle this is to:
3744  * 1) set the station as draining
3745  * 2) flush the Tx path
3746  * 3) wait for the transport queues to be empty
3747  */
3748 int
3749 iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3750 {
3751         int ret;
3752         struct iwm_tx_path_flush_cmd flush_cmd = {
3753                 .queues_ctl = htole32(tfd_msk),
3754                 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3755         };
3756
3757         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3758             sizeof(flush_cmd), &flush_cmd);
3759         if (ret)
3760                 device_printf(sc->sc_dev,
3761                     "Flushing tx queue failed: %d\n", ret);
3762         return ret;
3763 }
3764
3765 /*
3766  * BEGIN mvm/sta.c
3767  */
3768
3769 static int
3770 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3771         struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3772 {
3773         return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3774             cmd, status);
3775 }
3776
3777 /* send station add/update command to firmware */
3778 static int
3779 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3780 {
3781         struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3782         int ret;
3783         uint32_t status;
3784
3785         memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3786
3787         add_sta_cmd.sta_id = IWM_STATION_ID;
3788         add_sta_cmd.mac_id_n_color
3789             = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3790                 IWM_DEFAULT_COLOR));
3791         if (!update) {
3792                 int ac;
3793                 for (ac = 0; ac < WME_NUM_AC; ac++) {
3794                         add_sta_cmd.tfd_queue_msk |=
3795                             htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3796                 }
3797                 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3798         }
3799         add_sta_cmd.add_modify = update ? 1 : 0;
3800         add_sta_cmd.station_flags_msk
3801             |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3802         add_sta_cmd.tid_disable_tx = htole16(0xffff);
3803         if (update)
3804                 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3805
3806         status = IWM_ADD_STA_SUCCESS;
3807         ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3808         if (ret)
3809                 return ret;
3810
3811         switch (status) {
3812         case IWM_ADD_STA_SUCCESS:
3813                 break;
3814         default:
3815                 ret = EIO;
3816                 device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3817                 break;
3818         }
3819
3820         return ret;
3821 }
3822
3823 static int
3824 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3825 {
3826         return iwm_mvm_sta_send_to_fw(sc, in, 0);
3827 }
3828
3829 static int
3830 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3831 {
3832         return iwm_mvm_sta_send_to_fw(sc, in, 1);
3833 }
3834
3835 static int
3836 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3837         const uint8_t *addr, uint16_t mac_id, uint16_t color)
3838 {
3839         struct iwm_mvm_add_sta_cmd_v7 cmd;
3840         int ret;
3841         uint32_t status;
3842
3843         memset(&cmd, 0, sizeof(cmd));
3844         cmd.sta_id = sta->sta_id;
3845         cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3846
3847         cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3848         cmd.tid_disable_tx = htole16(0xffff);
3849
3850         if (addr)
3851                 IEEE80211_ADDR_COPY(cmd.addr, addr);
3852
3853         ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3854         if (ret)
3855                 return ret;
3856
3857         switch (status) {
3858         case IWM_ADD_STA_SUCCESS:
3859                 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3860                     "%s: Internal station added.\n", __func__);
3861                 return 0;
3862         default:
3863                 device_printf(sc->sc_dev,
3864                     "%s: Add internal station failed, status=0x%x\n",
3865                     __func__, status);
3866                 ret = EIO;
3867                 break;
3868         }
3869         return ret;
3870 }
3871
3872 static int
3873 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3874 {
3875         int ret;
3876
3877         sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3878         sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3879
3880         ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3881         if (ret)
3882                 return ret;
3883
3884         ret = iwm_mvm_add_int_sta_common(sc,
3885             &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3886
3887         if (ret)
3888                 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3889         return ret;
3890 }
3891
3892 /*
3893  * END mvm/sta.c
3894  */
3895
3896 /*
3897  * BEGIN mvm/quota.c
3898  */
3899
3900 static int
3901 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
3902 {
3903         struct iwm_time_quota_cmd cmd;
3904         int i, idx, ret, num_active_macs, quota, quota_rem;
3905         int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3906         int n_ifs[IWM_MAX_BINDINGS] = {0, };
3907         uint16_t id;
3908
3909         memset(&cmd, 0, sizeof(cmd));
3910
3911         /* currently, PHY ID == binding ID */
3912         if (in) {
3913                 id = in->in_phyctxt->id;
3914                 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3915                 colors[id] = in->in_phyctxt->color;
3916
3917                 if (1)
3918                         n_ifs[id] = 1;
3919         }
3920
3921         /*
3922          * The FW's scheduling session consists of
3923          * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3924          * equally between all the bindings that require quota
3925          */
3926         num_active_macs = 0;
3927         for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3928                 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3929                 num_active_macs += n_ifs[i];
3930         }
3931
3932         quota = 0;
3933         quota_rem = 0;
3934         if (num_active_macs) {
3935                 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3936                 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3937         }
3938
3939         for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3940                 if (colors[i] < 0)
3941                         continue;
3942
3943                 cmd.quotas[idx].id_and_color =
3944                         htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3945
3946                 if (n_ifs[i] <= 0) {
3947                         cmd.quotas[idx].quota = htole32(0);
3948                         cmd.quotas[idx].max_duration = htole32(0);
3949                 } else {
3950                         cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3951                         cmd.quotas[idx].max_duration = htole32(0);
3952                 }
3953                 idx++;
3954         }
3955
3956         /* Give the remainder of the session to the first binding */
3957         cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3958
3959         ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3960             sizeof(cmd), &cmd);
3961         if (ret)
3962                 device_printf(sc->sc_dev,
3963                     "%s: Failed to send quota: %d\n", __func__, ret);
3964         return ret;
3965 }
3966
3967 /*
3968  * END mvm/quota.c
3969  */
3970
3971 /*
3972  * ieee80211 routines
3973  */
3974
3975 /*
3976  * Change to AUTH state in 80211 state machine.  Roughly matches what
3977  * Linux does in bss_info_changed().
3978  */
3979 static int
3980 iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3981 {
3982         struct ieee80211_node *ni;
3983         struct iwm_node *in;
3984         struct iwm_vap *iv = IWM_VAP(vap);
3985         uint32_t duration;
3986         int error;
3987
3988         /*
3989          * XXX i have a feeling that the vap node is being
3990          * freed from underneath us. Grr.
3991          */
3992         ni = ieee80211_ref_node(vap->iv_bss);
3993         in = IWM_NODE(ni);
3994         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3995             "%s: called; vap=%p, bss ni=%p\n",
3996             __func__,
3997             vap,
3998             ni);
3999
4000         in->in_assoc = 0;
4001
4002         error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4003         if (error != 0)
4004                 return error;
4005
4006         error = iwm_allow_mcast(vap, sc);
4007         if (error) {
4008                 device_printf(sc->sc_dev,
4009                     "%s: failed to set multicast\n", __func__);
4010                 goto out;
4011         }
4012
4013         /*
4014          * This is where it deviates from what Linux does.
4015          *
4016          * Linux iwlwifi doesn't reset the nic each time, nor does it
4017          * call ctxt_add() here.  Instead, it adds it during vap creation,
4018          * and always does a mac_ctx_changed().
4019          *
4020          * The openbsd port doesn't attempt to do that - it reset things
4021          * at odd states and does the add here.
4022          *
4023          * So, until the state handling is fixed (ie, we never reset
4024          * the NIC except for a firmware failure, which should drag
4025          * the NIC back to IDLE, re-setup and re-add all the mac/phy
4026          * contexts that are required), let's do a dirty hack here.
4027          */
4028         if (iv->is_uploaded) {
4029                 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4030                         device_printf(sc->sc_dev,
4031                             "%s: failed to update MAC\n", __func__);
4032                         goto out;
4033                 }
4034                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4035                     in->in_ni.ni_chan, 1, 1)) != 0) {
4036                         device_printf(sc->sc_dev,
4037                             "%s: failed update phy ctxt\n", __func__);
4038                         goto out;
4039                 }
4040                 in->in_phyctxt = &sc->sc_phyctxt[0];
4041
4042                 if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
4043                         device_printf(sc->sc_dev,
4044                             "%s: binding update cmd\n", __func__);
4045                         goto out;
4046                 }
4047                 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4048                         device_printf(sc->sc_dev,
4049                             "%s: failed to update sta\n", __func__);
4050                         goto out;
4051                 }
4052         } else {
4053                 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4054                         device_printf(sc->sc_dev,
4055                             "%s: failed to add MAC\n", __func__);
4056                         goto out;
4057                 }
4058                 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4059                     in->in_ni.ni_chan, 1, 1)) != 0) {
4060                         device_printf(sc->sc_dev,
4061                             "%s: failed add phy ctxt!\n", __func__);
4062                         error = ETIMEDOUT;
4063                         goto out;
4064                 }
4065                 in->in_phyctxt = &sc->sc_phyctxt[0];
4066
4067                 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
4068                         device_printf(sc->sc_dev,
4069                             "%s: binding add cmd\n", __func__);
4070                         goto out;
4071                 }
4072                 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4073                         device_printf(sc->sc_dev,
4074                             "%s: failed to add sta\n", __func__);
4075                         goto out;
4076                 }
4077         }
4078
4079         /*
4080          * Prevent the FW from wandering off channel during association
4081          * by "protecting" the session with a time event.
4082          */
4083         /* XXX duration is in units of TU, not MS */
4084         duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4085         iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
4086         DELAY(100);
4087
4088         error = 0;
4089 out:
4090         ieee80211_free_node(ni);
4091         return (error);
4092 }
4093
4094 static int
4095 iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4096 {
4097         struct iwm_node *in = IWM_NODE(vap->iv_bss);
4098         int error;
4099
4100         if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4101                 device_printf(sc->sc_dev,
4102                     "%s: failed to update STA\n", __func__);
4103                 return error;
4104         }
4105
4106         in->in_assoc = 1;
4107         if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4108                 device_printf(sc->sc_dev,
4109                     "%s: failed to update MAC\n", __func__);
4110                 return error;
4111         }
4112
4113         return 0;
4114 }
4115
4116 static int
4117 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4118 {
4119         uint32_t tfd_msk;
4120
4121         /*
4122          * Ok, so *technically* the proper set of calls for going
4123          * from RUN back to SCAN is:
4124          *
4125          * iwm_mvm_power_mac_disable(sc, in);
4126          * iwm_mvm_mac_ctxt_changed(sc, in);
4127          * iwm_mvm_rm_sta(sc, in);
4128          * iwm_mvm_update_quotas(sc, NULL);
4129          * iwm_mvm_mac_ctxt_changed(sc, in);
4130          * iwm_mvm_binding_remove_vif(sc, in);
4131          * iwm_mvm_mac_ctxt_remove(sc, in);
4132          *
4133          * However, that freezes the device not matter which permutations
4134          * and modifications are attempted.  Obviously, this driver is missing
4135          * something since it works in the Linux driver, but figuring out what
4136          * is missing is a little more complicated.  Now, since we're going
4137          * back to nothing anyway, we'll just do a complete device reset.
4138          * Up your's, device!
4139          */
4140         /*
4141          * Just using 0xf for the queues mask is fine as long as we only
4142          * get here from RUN state.
4143          */
4144         tfd_msk = 0xf;
4145         mbufq_drain(&sc->sc_snd);
4146         iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4147         /*
4148          * We seem to get away with just synchronously sending the
4149          * IWM_TXPATH_FLUSH command.
4150          */
4151 //      iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4152         iwm_stop_device(sc);
4153         iwm_init_hw(sc);
4154         if (in)
4155                 in->in_assoc = 0;
4156         return 0;
4157
4158 #if 0
4159         int error;
4160
4161         iwm_mvm_power_mac_disable(sc, in);
4162
4163         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4164                 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4165                 return error;
4166         }
4167
4168         if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4169                 device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4170                 return error;
4171         }
4172         error = iwm_mvm_rm_sta(sc, in);
4173         in->in_assoc = 0;
4174         iwm_mvm_update_quotas(sc, NULL);
4175         if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4176                 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4177                 return error;
4178         }
4179         iwm_mvm_binding_remove_vif(sc, in);
4180
4181         iwm_mvm_mac_ctxt_remove(sc, in);
4182
4183         return error;
4184 #endif
4185 }
4186
4187 static struct ieee80211_node *
4188 iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4189 {
4190         return malloc(sizeof (struct iwm_node), M_80211_NODE,
4191             M_NOWAIT | M_ZERO);
4192 }
4193
4194 static void
4195 iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4196 {
4197         struct ieee80211_node *ni = &in->in_ni;
4198         struct iwm_lq_cmd *lq = &in->in_lq;
4199         int nrates = ni->ni_rates.rs_nrates;
4200         int i, ridx, tab = 0;
4201 //      int txant = 0;
4202
4203         if (nrates > nitems(lq->rs_table)) {
4204                 device_printf(sc->sc_dev,
4205                     "%s: node supports %d rates, driver handles "
4206                     "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4207                 return;
4208         }
4209         if (nrates == 0) {
4210                 device_printf(sc->sc_dev,
4211                     "%s: node supports 0 rates, odd!\n", __func__);
4212                 return;
4213         }
4214
4215         /*
4216          * XXX .. and most of iwm_node is not initialised explicitly;
4217          * it's all just 0x0 passed to the firmware.
4218          */
4219
4220         /* first figure out which rates we should support */
4221         /* XXX TODO: this isn't 11n aware /at all/ */
4222         memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4223         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4224             "%s: nrates=%d\n", __func__, nrates);
4225
4226         /*
4227          * Loop over nrates and populate in_ridx from the highest
4228          * rate to the lowest rate.  Remember, in_ridx[] has
4229          * IEEE80211_RATE_MAXSIZE entries!
4230          */
4231         for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4232                 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4233
4234                 /* Map 802.11 rate to HW rate index. */
4235                 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4236                         if (iwm_rates[ridx].rate == rate)
4237                                 break;
4238                 if (ridx > IWM_RIDX_MAX) {
4239                         device_printf(sc->sc_dev,
4240                             "%s: WARNING: device rate for %d not found!\n",
4241                             __func__, rate);
4242                 } else {
4243                         IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4244                             "%s: rate: i: %d, rate=%d, ridx=%d\n",
4245                             __func__,
4246                             i,
4247                             rate,
4248                             ridx);
4249                         in->in_ridx[i] = ridx;
4250                 }
4251         }
4252
4253         /* then construct a lq_cmd based on those */
4254         memset(lq, 0, sizeof(*lq));
4255         lq->sta_id = IWM_STATION_ID;
4256
4257         /* For HT, always enable RTS/CTS to avoid excessive retries. */
4258         if (ni->ni_flags & IEEE80211_NODE_HT)
4259                 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4260
4261         /*
4262          * are these used? (we don't do SISO or MIMO)
4263          * need to set them to non-zero, though, or we get an error.
4264          */
4265         lq->single_stream_ant_msk = 1;
4266         lq->dual_stream_ant_msk = 1;
4267
4268         /*
4269          * Build the actual rate selection table.
4270          * The lowest bits are the rates.  Additionally,
4271          * CCK needs bit 9 to be set.  The rest of the bits
4272          * we add to the table select the tx antenna
4273          * Note that we add the rates in the highest rate first
4274          * (opposite of ni_rates).
4275          */
4276         /*
4277          * XXX TODO: this should be looping over the min of nrates
4278          * and LQ_MAX_RETRY_NUM.  Sigh.
4279          */
4280         for (i = 0; i < nrates; i++) {
4281                 int nextant;
4282
4283 #if 0
4284                 if (txant == 0)
4285                         txant = iwm_mvm_get_valid_tx_ant(sc);
4286                 nextant = 1<<(ffs(txant)-1);
4287                 txant &= ~nextant;
4288 #else
4289                 nextant = iwm_mvm_get_valid_tx_ant(sc);
4290 #endif
4291                 /*
4292                  * Map the rate id into a rate index into
4293                  * our hardware table containing the
4294                  * configuration to use for this rate.
4295                  */
4296                 ridx = in->in_ridx[i];
4297                 tab = iwm_rates[ridx].plcp;
4298                 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4299                 if (IWM_RIDX_IS_CCK(ridx))
4300                         tab |= IWM_RATE_MCS_CCK_MSK;
4301                 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4302                     "station rate i=%d, rate=%d, hw=%x\n",
4303                     i, iwm_rates[ridx].rate, tab);
4304                 lq->rs_table[i] = htole32(tab);
4305         }
4306         /* then fill the rest with the lowest possible rate */
4307         for (i = nrates; i < nitems(lq->rs_table); i++) {
4308                 KASSERT(tab != 0, ("invalid tab"));
4309                 lq->rs_table[i] = htole32(tab);
4310         }
4311 }
4312
4313 static int
4314 iwm_media_change(struct ifnet *ifp)
4315 {
4316         struct ieee80211vap *vap = ifp->if_softc;
4317         struct ieee80211com *ic = vap->iv_ic;
4318         struct iwm_softc *sc = ic->ic_softc;
4319         int error;
4320
4321         error = ieee80211_media_change(ifp);
4322         if (error != ENETRESET)
4323                 return error;
4324
4325         IWM_LOCK(sc);
4326         if (ic->ic_nrunning > 0) {
4327                 iwm_stop(sc);
4328                 iwm_init(sc);
4329         }
4330         IWM_UNLOCK(sc);
4331         return error;
4332 }
4333
4334
4335 static int
4336 iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4337 {
4338         struct iwm_vap *ivp = IWM_VAP(vap);
4339         struct ieee80211com *ic = vap->iv_ic;
4340         struct iwm_softc *sc = ic->ic_softc;
4341         struct iwm_node *in;
4342         int error;
4343
4344         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4345             "switching state %s -> %s\n",
4346             ieee80211_state_name[vap->iv_state],
4347             ieee80211_state_name[nstate]);
4348         IEEE80211_UNLOCK(ic);
4349         IWM_LOCK(sc);
4350
4351         if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4352                 iwm_led_blink_stop(sc);
4353
4354         /* disable beacon filtering if we're hopping out of RUN */
4355         if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4356                 iwm_mvm_disable_beacon_filter(sc);
4357
4358                 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4359                         in->in_assoc = 0;
4360
4361                 if (nstate == IEEE80211_S_INIT) {
4362                         IWM_UNLOCK(sc);
4363                         IEEE80211_LOCK(ic);
4364                         error = ivp->iv_newstate(vap, nstate, arg);
4365                         IEEE80211_UNLOCK(ic);
4366                         IWM_LOCK(sc);
4367                         iwm_release(sc, NULL);
4368                         IWM_UNLOCK(sc);
4369                         IEEE80211_LOCK(ic);
4370                         return error;
4371                 }
4372
4373                 /*
4374                  * It's impossible to directly go RUN->SCAN. If we iwm_release()
4375                  * above then the card will be completely reinitialized,
4376                  * so the driver must do everything necessary to bring the card
4377                  * from INIT to SCAN.
4378                  *
4379                  * Additionally, upon receiving deauth frame from AP,
4380                  * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4381                  * state. This will also fail with this driver, so bring the FSM
4382                  * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4383                  *
4384                  * XXX TODO: fix this for FreeBSD!
4385                  */
4386                 if (nstate == IEEE80211_S_SCAN ||
4387                     nstate == IEEE80211_S_AUTH ||
4388                     nstate == IEEE80211_S_ASSOC) {
4389                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4390                             "Force transition to INIT; MGT=%d\n", arg);
4391                         IWM_UNLOCK(sc);
4392                         IEEE80211_LOCK(ic);
4393                         /* Always pass arg as -1 since we can't Tx right now. */
4394                         /*
4395                          * XXX arg is just ignored anyway when transitioning
4396                          *     to IEEE80211_S_INIT.
4397                          */
4398                         vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4399                         IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4400                             "Going INIT->SCAN\n");
4401                         nstate = IEEE80211_S_SCAN;
4402                         IEEE80211_UNLOCK(ic);
4403                         IWM_LOCK(sc);
4404                 }
4405         }
4406
4407         switch (nstate) {
4408         case IEEE80211_S_INIT:
4409                 break;
4410
4411         case IEEE80211_S_AUTH:
4412                 if ((error = iwm_auth(vap, sc)) != 0) {
4413                         device_printf(sc->sc_dev,
4414                             "%s: could not move to auth state: %d\n",
4415                             __func__, error);
4416                         break;
4417                 }
4418                 break;
4419
4420         case IEEE80211_S_ASSOC:
4421                 if ((error = iwm_assoc(vap, sc)) != 0) {
4422                         device_printf(sc->sc_dev,
4423                             "%s: failed to associate: %d\n", __func__,
4424                             error);
4425                         break;
4426                 }
4427                 break;
4428
4429         case IEEE80211_S_RUN:
4430         {
4431                 struct iwm_host_cmd cmd = {
4432                         .id = IWM_LQ_CMD,
4433                         .len = { sizeof(in->in_lq), },
4434                         .flags = IWM_CMD_SYNC,
4435                 };
4436
4437                 /* Update the association state, now we have it all */
4438                 /* (eg associd comes in at this point */
4439                 error = iwm_assoc(vap, sc);
4440                 if (error != 0) {
4441                         device_printf(sc->sc_dev,
4442                             "%s: failed to update association state: %d\n",
4443                             __func__,
4444                             error);
4445                         break;
4446                 }
4447
4448                 in = IWM_NODE(vap->iv_bss);
4449                 iwm_mvm_power_mac_update_mode(sc, in);
4450                 iwm_mvm_enable_beacon_filter(sc, in);
4451                 iwm_mvm_update_quotas(sc, in);
4452                 iwm_setrates(sc, in);
4453
4454                 cmd.data[0] = &in->in_lq;
4455                 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4456                         device_printf(sc->sc_dev,
4457                             "%s: IWM_LQ_CMD failed\n", __func__);
4458                 }
4459
4460                 iwm_mvm_led_enable(sc);
4461                 break;
4462         }
4463
4464         default:
4465                 break;
4466         }
4467         IWM_UNLOCK(sc);
4468         IEEE80211_LOCK(ic);
4469
4470         return (ivp->iv_newstate(vap, nstate, arg));
4471 }
4472
4473 void
4474 iwm_endscan_cb(void *arg, int pending)
4475 {
4476         struct iwm_softc *sc = arg;
4477         struct ieee80211com *ic = &sc->sc_ic;
4478
4479         IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4480             "%s: scan ended\n",
4481             __func__);
4482
4483         ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4484 }
4485
4486 /*
4487  * Aging and idle timeouts for the different possible scenarios
4488  * in default configuration
4489  */
4490 static const uint32_t
4491 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4492         {
4493                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4494                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4495         },
4496         {
4497                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4498                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4499         },
4500         {
4501                 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4502                 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4503         },
4504         {
4505                 htole32(IWM_SF_BA_AGING_TIMER_DEF),
4506                 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4507         },
4508         {
4509                 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4510                 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4511         },
4512 };
4513
4514 /*
4515  * Aging and idle timeouts for the different possible scenarios
4516  * in single BSS MAC configuration.
4517  */
4518 static const uint32_t
4519 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4520         {
4521                 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4522                 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4523         },
4524         {
4525                 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4526                 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4527         },
4528         {
4529                 htole32(IWM_SF_MCAST_AGING_TIMER),
4530                 htole32(IWM_SF_MCAST_IDLE_TIMER)
4531         },
4532         {
4533                 htole32(IWM_SF_BA_AGING_TIMER),
4534                 htole32(IWM_SF_BA_IDLE_TIMER)
4535         },
4536         {
4537                 htole32(IWM_SF_TX_RE_AGING_TIMER),
4538                 htole32(IWM_SF_TX_RE_IDLE_TIMER)
4539         },
4540 };
4541
4542 static void
4543 iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4544     struct ieee80211_node *ni)
4545 {
4546         int i, j, watermark;
4547
4548         sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4549
4550         /*
4551          * If we are in association flow - check antenna configuration
4552          * capabilities of the AP station, and choose the watermark accordingly.
4553          */
4554         if (ni) {
4555                 if (ni->ni_flags & IEEE80211_NODE_HT) {
4556 #ifdef notyet
4557                         if (ni->ni_rxmcs[2] != 0)
4558                                 watermark = IWM_SF_W_MARK_MIMO3;
4559                         else if (ni->ni_rxmcs[1] != 0)
4560                                 watermark = IWM_SF_W_MARK_MIMO2;
4561                         else
4562 #endif
4563                                 watermark = IWM_SF_W_MARK_SISO;
4564                 } else {
4565                         watermark = IWM_SF_W_MARK_LEGACY;
4566                 }
4567         /* default watermark value for unassociated mode. */
4568         } else {
4569                 watermark = IWM_SF_W_MARK_MIMO2;
4570         }
4571         sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4572
4573         for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4574                 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4575                         sf_cmd->long_delay_timeouts[i][j] =
4576                                         htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4577                 }
4578         }
4579
4580         if (ni) {
4581                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4582                        sizeof(iwm_sf_full_timeout));
4583         } else {
4584                 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4585                        sizeof(iwm_sf_full_timeout_def));
4586         }
4587 }
4588
4589 static int
4590 iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4591 {
4592         struct ieee80211com *ic = &sc->sc_ic;
4593         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4594         struct iwm_sf_cfg_cmd sf_cmd = {
4595                 .state = htole32(IWM_SF_FULL_ON),
4596         };
4597         int ret = 0;
4598
4599         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4600                 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4601
4602         switch (new_state) {
4603         case IWM_SF_UNINIT:
4604         case IWM_SF_INIT_OFF:
4605                 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4606                 break;
4607         case IWM_SF_FULL_ON:
4608                 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4609                 break;
4610         default:
4611                 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4612                     "Invalid state: %d. not sending Smart Fifo cmd\n",
4613                           new_state);
4614                 return EINVAL;
4615         }
4616
4617         ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4618                                    sizeof(sf_cmd), &sf_cmd);
4619         return ret;
4620 }
4621
4622 static int
4623 iwm_send_bt_init_conf(struct iwm_softc *sc)
4624 {
4625         struct iwm_bt_coex_cmd bt_cmd;
4626
4627         bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4628         bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4629
4630         return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4631             &bt_cmd);
4632 }
4633
4634 static int
4635 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4636 {
4637         struct iwm_mcc_update_cmd mcc_cmd;
4638         struct iwm_host_cmd hcmd = {
4639                 .id = IWM_MCC_UPDATE_CMD,
4640                 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4641                 .data = { &mcc_cmd },
4642         };
4643         int ret;
4644 #ifdef IWM_DEBUG
4645         struct iwm_rx_packet *pkt;
4646         struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4647         struct iwm_mcc_update_resp *mcc_resp;
4648         int n_channels;
4649         uint16_t mcc;
4650 #endif
4651         int resp_v2 = isset(sc->sc_enabled_capa,
4652             IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4653
4654         memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4655         mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4656         if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4657             isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4658                 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4659         else
4660                 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4661
4662         if (resp_v2)
4663                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4664         else
4665                 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4666
4667         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4668             "send MCC update to FW with '%c%c' src = %d\n",
4669             alpha2[0], alpha2[1], mcc_cmd.source_id);
4670
4671         ret = iwm_send_cmd(sc, &hcmd);
4672         if (ret)
4673                 return ret;
4674
4675 #ifdef IWM_DEBUG
4676         pkt = hcmd.resp_pkt;
4677
4678         /* Extract MCC response */
4679         if (resp_v2) {
4680                 mcc_resp = (void *)pkt->data;
4681                 mcc = mcc_resp->mcc;
4682                 n_channels =  le32toh(mcc_resp->n_channels);
4683         } else {
4684                 mcc_resp_v1 = (void *)pkt->data;
4685                 mcc = mcc_resp_v1->mcc;
4686                 n_channels =  le32toh(mcc_resp_v1->n_channels);
4687         }
4688
4689         /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4690         if (mcc == 0)
4691                 mcc = 0x3030;  /* "00" - world */
4692
4693         IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4694             "regulatory domain '%c%c' (%d channels available)\n",
4695             mcc >> 8, mcc & 0xff, n_channels);
4696 #endif
4697         iwm_free_resp(sc, &hcmd);
4698
4699         return 0;
4700 }
4701
4702 static void
4703 iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4704 {
4705         struct iwm_host_cmd cmd = {
4706                 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4707                 .len = { sizeof(uint32_t), },
4708                 .data = { &backoff, },
4709         };
4710
4711         if (iwm_send_cmd(sc, &cmd) != 0) {
4712                 device_printf(sc->sc_dev,
4713                     "failed to change thermal tx backoff\n");
4714         }
4715 }
4716
4717 static int
4718 iwm_init_hw(struct iwm_softc *sc)
4719 {
4720         struct ieee80211com *ic = &sc->sc_ic;
4721         int error, i, ac;
4722
4723         if ((error = iwm_start_hw(sc)) != 0) {
4724                 printf("iwm_start_hw: failed %d\n", error);
4725                 return error;
4726         }
4727
4728         if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4729                 printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4730                 return error;
4731         }
4732
4733         /*
4734          * should stop and start HW since that INIT
4735          * image just loaded
4736          */
4737         iwm_stop_device(sc);
4738         if ((error = iwm_start_hw(sc)) != 0) {
4739                 device_printf(sc->sc_dev, "could not initialize hardware\n");
4740                 return error;
4741         }
4742
4743         /* omstart, this time with the regular firmware */
4744         error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4745         if (error) {
4746                 device_printf(sc->sc_dev, "could not load firmware\n");
4747                 goto error;
4748         }
4749
4750         if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4751                 device_printf(sc->sc_dev, "bt init conf failed\n");
4752                 goto error;
4753         }
4754
4755         error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4756         if (error != 0) {
4757                 device_printf(sc->sc_dev, "antenna config failed\n");
4758                 goto error;
4759         }
4760
4761         /* Send phy db control command and then phy db calibration */
4762         if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4763                 goto error;
4764
4765         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4766                 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4767                 goto error;
4768         }
4769
4770         /* Add auxiliary station for scanning */
4771         if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4772                 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4773                 goto error;
4774         }
4775
4776         for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4777                 /*
4778                  * The channel used here isn't relevant as it's
4779                  * going to be overwritten in the other flows.
4780                  * For now use the first channel we have.
4781                  */
4782                 if ((error = iwm_mvm_phy_ctxt_add(sc,
4783                     &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4784                         goto error;
4785         }
4786
4787         /* Initialize tx backoffs to the minimum. */
4788         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4789                 iwm_mvm_tt_tx_backoff(sc, 0);
4790
4791         error = iwm_mvm_power_update_device(sc);
4792         if (error)
4793                 goto error;
4794
4795         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4796                 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4797                         goto error;
4798         }
4799
4800         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4801                 if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4802                         goto error;
4803         }
4804
4805         /* Enable Tx queues. */
4806         for (ac = 0; ac < WME_NUM_AC; ac++) {
4807                 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4808                     iwm_mvm_ac_to_tx_fifo[ac]);
4809                 if (error)
4810                         goto error;
4811         }
4812
4813         if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4814                 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4815                 goto error;
4816         }
4817
4818         return 0;
4819
4820  error:
4821         iwm_stop_device(sc);
4822         return error;
4823 }
4824
4825 /* Allow multicast from our BSSID. */
4826 static int
4827 iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4828 {
4829         struct ieee80211_node *ni = vap->iv_bss;
4830         struct iwm_mcast_filter_cmd *cmd;
4831         size_t size;
4832         int error;
4833
4834         size = roundup(sizeof(*cmd), 4);
4835         cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4836         if (cmd == NULL)
4837                 return ENOMEM;
4838         cmd->filter_own = 1;
4839         cmd->port_id = 0;
4840         cmd->count = 0;
4841         cmd->pass_all = 1;
4842         IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4843
4844         error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4845             IWM_CMD_SYNC, size, cmd);
4846         free(cmd, M_DEVBUF);
4847
4848         return (error);
4849 }
4850
4851 /*
4852  * ifnet interfaces
4853  */
4854
4855 static void
4856 iwm_init(struct iwm_softc *sc)
4857 {
4858         int error;
4859
4860         if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4861                 return;
4862         }
4863         sc->sc_generation++;
4864         sc->sc_flags &= ~IWM_FLAG_STOPPED;
4865
4866         if ((error = iwm_init_hw(sc)) != 0) {
4867                 printf("iwm_init_hw failed %d\n", error);
4868                 iwm_stop(sc);
4869                 return;
4870         }
4871
4872         /*
4873          * Ok, firmware loaded and we are jogging
4874          */
4875         sc->sc_flags |= IWM_FLAG_HW_INITED;
4876         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4877 }
4878
4879 static int
4880 iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4881 {
4882         struct iwm_softc *sc;
4883         int error;
4884
4885         sc = ic->ic_softc;
4886
4887         IWM_LOCK(sc);
4888         if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4889                 IWM_UNLOCK(sc);
4890                 return (ENXIO);
4891         }
4892         error = mbufq_enqueue(&sc->sc_snd, m);
4893         if (error) {
4894                 IWM_UNLOCK(sc);
4895                 return (error);
4896         }
4897         iwm_start(sc);
4898         IWM_UNLOCK(sc);
4899         return (0);
4900 }
4901
4902 /*
4903  * Dequeue packets from sendq and call send.
4904  */
4905 static void
4906 iwm_start(struct iwm_softc *sc)
4907 {
4908         struct ieee80211_node *ni;
4909         struct mbuf *m;
4910         int ac = 0;
4911
4912         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4913         while (sc->qfullmsk == 0 &&
4914                 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4915                 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4916                 if (iwm_tx(sc, m, ni, ac) != 0) {
4917                         if_inc_counter(ni->ni_vap->iv_ifp,
4918                             IFCOUNTER_OERRORS, 1);
4919                         ieee80211_free_node(ni);
4920                         continue;
4921                 }
4922                 sc->sc_tx_timer = 15;
4923         }
4924         IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4925 }
4926
4927 static void
4928 iwm_stop(struct iwm_softc *sc)
4929 {
4930
4931         sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4932         sc->sc_flags |= IWM_FLAG_STOPPED;
4933         sc->sc_generation++;
4934         iwm_led_blink_stop(sc);
4935         sc->sc_tx_timer = 0;
4936         iwm_stop_device(sc);
4937 }
4938
4939 static void
4940 iwm_watchdog(void *arg)
4941 {
4942         struct iwm_softc *sc = arg;
4943         struct ieee80211com *ic = &sc->sc_ic;
4944
4945         if (sc->sc_tx_timer > 0) {
4946                 if (--sc->sc_tx_timer == 0) {
4947                         device_printf(sc->sc_dev, "device timeout\n");
4948 #ifdef IWM_DEBUG
4949                         iwm_nic_error(sc);
4950 #endif
4951                         ieee80211_restart_all(ic);
4952                         counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4953                         return;
4954                 }
4955         }
4956         callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4957 }
4958
4959 static void
4960 iwm_parent(struct ieee80211com *ic)
4961 {
4962         struct iwm_softc *sc = ic->ic_softc;
4963         int startall = 0;
4964
4965         IWM_LOCK(sc);
4966         if (ic->ic_nrunning > 0) {
4967                 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4968                         iwm_init(sc);
4969                         startall = 1;
4970                 }
4971         } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4972                 iwm_stop(sc);
4973         IWM_UNLOCK(sc);
4974         if (startall)
4975                 ieee80211_start_all(ic);
4976 }
4977
4978 /*
4979  * The interrupt side of things
4980  */
4981
4982 /*
4983  * error dumping routines are from iwlwifi/mvm/utils.c
4984  */
4985
4986 /*
4987  * Note: This structure is read from the device with IO accesses,
4988  * and the reading already does the endian conversion. As it is
4989  * read with uint32_t-sized accesses, any members with a different size
4990  * need to be ordered correctly though!
4991  */
4992 struct iwm_error_event_table {
4993         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
4994         uint32_t error_id;              /* type of error */
4995         uint32_t trm_hw_status0;        /* TRM HW status */
4996         uint32_t trm_hw_status1;        /* TRM HW status */
4997         uint32_t blink2;                /* branch link */
4998         uint32_t ilink1;                /* interrupt link */
4999         uint32_t ilink2;                /* interrupt link */
5000         uint32_t data1;         /* error-specific data */
5001         uint32_t data2;         /* error-specific data */
5002         uint32_t data3;         /* error-specific data */
5003         uint32_t bcon_time;             /* beacon timer */
5004         uint32_t tsf_low;               /* network timestamp function timer */
5005         uint32_t tsf_hi;                /* network timestamp function timer */
5006         uint32_t gp1;           /* GP1 timer register */
5007         uint32_t gp2;           /* GP2 timer register */
5008         uint32_t fw_rev_type;   /* firmware revision type */
5009         uint32_t major;         /* uCode version major */
5010         uint32_t minor;         /* uCode version minor */
5011         uint32_t hw_ver;                /* HW Silicon version */
5012         uint32_t brd_ver;               /* HW board version */
5013         uint32_t log_pc;                /* log program counter */
5014         uint32_t frame_ptr;             /* frame pointer */
5015         uint32_t stack_ptr;             /* stack pointer */
5016         uint32_t hcmd;          /* last host command header */
5017         uint32_t isr0;          /* isr status register LMPM_NIC_ISR0:
5018                                  * rxtx_flag */
5019         uint32_t isr1;          /* isr status register LMPM_NIC_ISR1:
5020                                  * host_flag */
5021         uint32_t isr2;          /* isr status register LMPM_NIC_ISR2:
5022                                  * enc_flag */
5023         uint32_t isr3;          /* isr status register LMPM_NIC_ISR3:
5024                                  * time_flag */
5025         uint32_t isr4;          /* isr status register LMPM_NIC_ISR4:
5026                                  * wico interrupt */
5027         uint32_t last_cmd_id;   /* last HCMD id handled by the firmware */
5028         uint32_t wait_event;            /* wait event() caller address */
5029         uint32_t l2p_control;   /* L2pControlField */
5030         uint32_t l2p_duration;  /* L2pDurationField */
5031         uint32_t l2p_mhvalid;   /* L2pMhValidBits */
5032         uint32_t l2p_addr_match;        /* L2pAddrMatchStat */
5033         uint32_t lmpm_pmg_sel;  /* indicate which clocks are turned on
5034                                  * (LMPM_PMG_SEL) */
5035         uint32_t u_timestamp;   /* indicate when the date and time of the
5036                                  * compilation */
5037         uint32_t flow_handler;  /* FH read/write pointers, RX credit */
5038 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5039
5040 /*
5041  * UMAC error struct - relevant starting from family 8000 chip.
5042  * Note: This structure is read from the device with IO accesses,
5043  * and the reading already does the endian conversion. As it is
5044  * read with u32-sized accesses, any members with a different size
5045  * need to be ordered correctly though!
5046  */
5047 struct iwm_umac_error_event_table {
5048         uint32_t valid;         /* (nonzero) valid, (0) log is empty */
5049         uint32_t error_id;      /* type of error */
5050         uint32_t blink1;        /* branch link */
5051         uint32_t blink2;        /* branch link */
5052         uint32_t ilink1;        /* interrupt link */
5053         uint32_t ilink2;        /* interrupt link */
5054         uint32_t data1;         /* error-specific data */
5055         uint32_t data2;         /* error-specific data */
5056         uint32_t data3;         /* error-specific data */
5057         uint32_t umac_major;
5058         uint32_t umac_minor;
5059         uint32_t frame_pointer; /* core register 27*/
5060         uint32_t stack_pointer; /* core register 28 */
5061         uint32_t cmd_header;    /* latest host cmd sent to UMAC */
5062         uint32_t nic_isr_pref;  /* ISR status register */
5063 } __packed;
5064
5065 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5066 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5067
5068 #ifdef IWM_DEBUG
5069 struct {
5070         const char *name;
5071         uint8_t num;
5072 } advanced_lookup[] = {
5073         { "NMI_INTERRUPT_WDG", 0x34 },
5074         { "SYSASSERT", 0x35 },
5075         { "UCODE_VERSION_MISMATCH", 0x37 },
5076         { "BAD_COMMAND", 0x38 },
5077         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5078         { "FATAL_ERROR", 0x3D },
5079         { "NMI_TRM_HW_ERR", 0x46 },
5080         { "NMI_INTERRUPT_TRM", 0x4C },
5081         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5082         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5083         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5084         { "NMI_INTERRUPT_HOST", 0x66 },
5085         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5086         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5087         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5088         { "ADVANCED_SYSASSERT", 0 },
5089 };
5090
5091 static const char *
5092 iwm_desc_lookup(uint32_t num)
5093 {
5094         int i;
5095
5096         for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5097                 if (advanced_lookup[i].num == num)
5098                         return advanced_lookup[i].name;
5099
5100         /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5101         return advanced_lookup[i].name;
5102 }
5103
5104 static void
5105 iwm_nic_umac_error(struct iwm_softc *sc)
5106 {
5107         struct iwm_umac_error_event_table table;
5108         uint32_t base;
5109
5110         base = sc->sc_uc.uc_umac_error_event_table;
5111
5112         if (base < 0x800000) {
5113                 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5114                     base);
5115                 return;
5116         }
5117
5118         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5119                 device_printf(sc->sc_dev, "reading errlog failed\n");
5120                 return;
5121         }
5122
5123         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5124                 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5125                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5126                     sc->sc_flags, table.valid);
5127         }
5128
5129         device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5130                 iwm_desc_lookup(table.error_id));
5131         device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5132         device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5133         device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5134             table.ilink1);
5135         device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5136             table.ilink2);
5137         device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5138         device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5139         device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5140         device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5141         device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5142         device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5143             table.frame_pointer);
5144         device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5145             table.stack_pointer);
5146         device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5147         device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5148             table.nic_isr_pref);
5149 }
5150
5151 /*
5152  * Support for dumping the error log seemed like a good idea ...
5153  * but it's mostly hex junk and the only sensible thing is the
5154  * hw/ucode revision (which we know anyway).  Since it's here,
5155  * I'll just leave it in, just in case e.g. the Intel guys want to
5156  * help us decipher some "ADVANCED_SYSASSERT" later.
5157  */
5158 static void
5159 iwm_nic_error(struct iwm_softc *sc)
5160 {
5161         struct iwm_error_event_table table;
5162         uint32_t base;
5163
5164         device_printf(sc->sc_dev, "dumping device error log\n");
5165         base = sc->sc_uc.uc_error_event_table;
5166         if (base < 0x800000) {
5167                 device_printf(sc->sc_dev,
5168                     "Invalid error log pointer 0x%08x\n", base);
5169                 return;
5170         }
5171
5172         if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5173                 device_printf(sc->sc_dev, "reading errlog failed\n");
5174                 return;
5175         }
5176
5177         if (!table.valid) {
5178                 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5179                 return;
5180         }
5181
5182         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5183                 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5184                 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5185                     sc->sc_flags, table.valid);
5186         }
5187
5188         device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5189             iwm_desc_lookup(table.error_id));
5190         device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5191             table.trm_hw_status0);
5192         device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5193             table.trm_hw_status1);
5194         device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5195         device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5196         device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5197         device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5198         device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5199         device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5200         device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5201         device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5202         device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5203         device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5204         device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5205         device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5206             table.fw_rev_type);
5207         device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5208         device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5209         device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5210         device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5211         device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5212         device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5213         device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5214         device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5215         device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5216         device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5217         device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5218         device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5219         device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5220         device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5221         device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5222         device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5223         device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5224         device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5225         device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5226
5227         if (sc->sc_uc.uc_umac_error_event_table)
5228                 iwm_nic_umac_error(sc);
5229 }
5230 #endif
5231
5232 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5233
5234 /*
5235  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5236  * Basic structure from if_iwn
5237  */
5238 static void
5239 iwm_notif_intr(struct iwm_softc *sc)
5240 {
5241         struct ieee80211com *ic = &sc->sc_ic;
5242         uint16_t hw;
5243
5244         bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5245             BUS_DMASYNC_POSTREAD);
5246
5247         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5248
5249         /*
5250          * Process responses
5251          */
5252         while (sc->rxq.cur != hw) {
5253                 struct iwm_rx_ring *ring = &sc->rxq;
5254                 struct iwm_rx_data *data = &ring->data[ring->cur];
5255                 struct iwm_rx_packet *pkt;
5256                 struct iwm_cmd_response *cresp;
5257                 int qid, idx, code;
5258
5259                 bus_dmamap_sync(ring->data_dmat, data->map,
5260                     BUS_DMASYNC_POSTREAD);
5261                 pkt = mtod(data->m, struct iwm_rx_packet *);
5262
5263                 qid = pkt->hdr.qid & ~0x80;
5264                 idx = pkt->hdr.idx;
5265
5266                 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5267                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5268                     "rx packet qid=%d idx=%d type=%x %d %d\n",
5269                     pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5270
5271                 /*
5272                  * randomly get these from the firmware, no idea why.
5273                  * they at least seem harmless, so just ignore them for now
5274                  */
5275                 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5276                     || pkt->len_n_flags == htole32(0x55550000))) {
5277                         ADVANCE_RXQ(sc);
5278                         continue;
5279                 }
5280
5281                 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5282
5283                 switch (code) {
5284                 case IWM_REPLY_RX_PHY_CMD:
5285                         iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5286                         break;
5287
5288                 case IWM_REPLY_RX_MPDU_CMD:
5289                         iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5290                         break;
5291
5292                 case IWM_TX_CMD:
5293                         iwm_mvm_rx_tx_cmd(sc, pkt, data);
5294                         break;
5295
5296                 case IWM_MISSED_BEACONS_NOTIFICATION: {
5297                         struct iwm_missed_beacons_notif *resp;
5298                         int missed;
5299
5300                         /* XXX look at mac_id to determine interface ID */
5301                         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5302
5303                         resp = (void *)pkt->data;
5304                         missed = le32toh(resp->consec_missed_beacons);
5305
5306                         IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5307                             "%s: MISSED_BEACON: mac_id=%d, "
5308                             "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5309                             "num_rx=%d\n",
5310                             __func__,
5311                             le32toh(resp->mac_id),
5312                             le32toh(resp->consec_missed_beacons_since_last_rx),
5313                             le32toh(resp->consec_missed_beacons),
5314                             le32toh(resp->num_expected_beacons),
5315                             le32toh(resp->num_recvd_beacons));
5316
5317                         /* Be paranoid */
5318                         if (vap == NULL)
5319                                 break;
5320
5321                         /* XXX no net80211 locking? */
5322                         if (vap->iv_state == IEEE80211_S_RUN &&
5323                             (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5324                                 if (missed > vap->iv_bmissthreshold) {
5325                                         /* XXX bad locking; turn into task */
5326                                         IWM_UNLOCK(sc);
5327                                         ieee80211_beacon_miss(ic);
5328                                         IWM_LOCK(sc);
5329                                 }
5330                         }
5331
5332                         break; }
5333
5334                 case IWM_MFUART_LOAD_NOTIFICATION:
5335                         break;
5336
5337                 case IWM_MVM_ALIVE: {
5338                         struct iwm_mvm_alive_resp_v1 *resp1;
5339                         struct iwm_mvm_alive_resp_v2 *resp2;
5340                         struct iwm_mvm_alive_resp_v3 *resp3;
5341
5342                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
5343                                 resp1 = (void *)pkt->data;
5344                                 sc->sc_uc.uc_error_event_table
5345                                     = le32toh(resp1->error_event_table_ptr);
5346                                 sc->sc_uc.uc_log_event_table
5347                                     = le32toh(resp1->log_event_table_ptr);
5348                                 sc->sched_base = le32toh(resp1->scd_base_ptr);
5349                                 if (resp1->status == IWM_ALIVE_STATUS_OK)
5350                                         sc->sc_uc.uc_ok = 1;
5351                                 else
5352                                         sc->sc_uc.uc_ok = 0;
5353                         }
5354
5355                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
5356                                 resp2 = (void *)pkt->data;
5357                                 sc->sc_uc.uc_error_event_table
5358                                     = le32toh(resp2->error_event_table_ptr);
5359                                 sc->sc_uc.uc_log_event_table
5360                                     = le32toh(resp2->log_event_table_ptr);
5361                                 sc->sched_base = le32toh(resp2->scd_base_ptr);
5362                                 sc->sc_uc.uc_umac_error_event_table
5363                                     = le32toh(resp2->error_info_addr);
5364                                 if (resp2->status == IWM_ALIVE_STATUS_OK)
5365                                         sc->sc_uc.uc_ok = 1;
5366                                 else
5367                                         sc->sc_uc.uc_ok = 0;
5368                         }
5369
5370                         if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
5371                                 resp3 = (void *)pkt->data;
5372                                 sc->sc_uc.uc_error_event_table
5373                                     = le32toh(resp3->error_event_table_ptr);
5374                                 sc->sc_uc.uc_log_event_table
5375                                     = le32toh(resp3->log_event_table_ptr);
5376                                 sc->sched_base = le32toh(resp3->scd_base_ptr);
5377                                 sc->sc_uc.uc_umac_error_event_table
5378                                     = le32toh(resp3->error_info_addr);
5379                                 if (resp3->status == IWM_ALIVE_STATUS_OK)
5380                                         sc->sc_uc.uc_ok = 1;
5381                                 else
5382                                         sc->sc_uc.uc_ok = 0;
5383                         }
5384
5385                         sc->sc_uc.uc_intr = 1;
5386                         wakeup(&sc->sc_uc);
5387                         break; }
5388
5389                 case IWM_CALIB_RES_NOTIF_PHY_DB:
5390                         iwm_phy_db_set_section(sc->sc_phy_db, pkt);
5391                         break;
5392
5393                 case IWM_STATISTICS_NOTIFICATION: {
5394                         struct iwm_notif_statistics *stats;
5395                         stats = (void *)pkt->data;
5396                         memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5397                         sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5398                         break; }
5399
5400                 case IWM_NVM_ACCESS_CMD:
5401                 case IWM_MCC_UPDATE_CMD:
5402                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5403                                 memcpy(sc->sc_cmd_resp,
5404                                     pkt, sizeof(sc->sc_cmd_resp));
5405                         }
5406                         break;
5407
5408                 case IWM_MCC_CHUB_UPDATE_CMD: {
5409                         struct iwm_mcc_chub_notif *notif;
5410                         notif = (void *)pkt->data;
5411
5412                         sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5413                         sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5414                         sc->sc_fw_mcc[2] = '\0';
5415                         IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5416                             "fw source %d sent CC '%s'\n",
5417                             notif->source_id, sc->sc_fw_mcc);
5418                         break; }
5419
5420                 case IWM_DTS_MEASUREMENT_NOTIFICATION:
5421                         break;
5422
5423                 case IWM_PHY_CONFIGURATION_CMD:
5424                 case IWM_TX_ANT_CONFIGURATION_CMD:
5425                 case IWM_ADD_STA:
5426                 case IWM_MAC_CONTEXT_CMD:
5427                 case IWM_REPLY_SF_CFG_CMD:
5428                 case IWM_POWER_TABLE_CMD:
5429                 case IWM_PHY_CONTEXT_CMD:
5430                 case IWM_BINDING_CONTEXT_CMD:
5431                 case IWM_TIME_EVENT_CMD:
5432                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5433                 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5434                 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5435                 case IWM_REPLY_BEACON_FILTERING_CMD:
5436                 case IWM_MAC_PM_POWER_TABLE:
5437                 case IWM_TIME_QUOTA_CMD:
5438                 case IWM_REMOVE_STA:
5439                 case IWM_TXPATH_FLUSH:
5440                 case IWM_LQ_CMD:
5441                 case IWM_BT_CONFIG:
5442                 case IWM_REPLY_THERMAL_MNG_BACKOFF:
5443                         cresp = (void *)pkt->data;
5444                         if (sc->sc_wantresp == ((qid << 16) | idx)) {
5445                                 memcpy(sc->sc_cmd_resp,
5446                                     pkt, sizeof(*pkt)+sizeof(*cresp));
5447                         }
5448                         break;
5449
5450                 /* ignore */
5451                 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5452                         break;
5453
5454                 case IWM_INIT_COMPLETE_NOTIF:
5455                         sc->sc_init_complete = 1;
5456                         wakeup(&sc->sc_init_complete);
5457                         break;
5458
5459                 case IWM_SCAN_OFFLOAD_COMPLETE: {
5460                         struct iwm_periodic_scan_complete *notif;
5461                         notif = (void *)pkt->data;
5462                         break;
5463                 }
5464
5465                 case IWM_SCAN_ITERATION_COMPLETE: {
5466                         struct iwm_lmac_scan_complete_notif *notif;
5467                         notif = (void *)pkt->data;
5468                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5469                         break;
5470                 }
5471  
5472                 case IWM_SCAN_COMPLETE_UMAC: {
5473                         struct iwm_umac_scan_complete *notif;
5474                         notif = (void *)pkt->data;
5475
5476                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5477                             "UMAC scan complete, status=0x%x\n",
5478                             notif->status);
5479 #if 0   /* XXX This would be a duplicate scan end call */
5480                         taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
5481 #endif
5482                         break;
5483                 }
5484
5485                 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5486                         struct iwm_umac_scan_iter_complete_notif *notif;
5487                         notif = (void *)pkt->data;
5488
5489                         IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5490                             "complete, status=0x%x, %d channels scanned\n",
5491                             notif->status, notif->scanned_channels);
5492                         ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5493                         break;
5494                 }
5495
5496                 case IWM_REPLY_ERROR: {
5497                         struct iwm_error_resp *resp;
5498                         resp = (void *)pkt->data;
5499
5500                         device_printf(sc->sc_dev,
5501                             "firmware error 0x%x, cmd 0x%x\n",
5502                             le32toh(resp->error_type),
5503                             resp->cmd_id);
5504                         break;
5505                 }
5506
5507                 case IWM_TIME_EVENT_NOTIFICATION: {
5508                         struct iwm_time_event_notif *notif;
5509                         notif = (void *)pkt->data;
5510
5511                         IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5512                             "TE notif status = 0x%x action = 0x%x\n",
5513                             notif->status, notif->action);
5514                         break;
5515                 }
5516
5517                 case IWM_MCAST_FILTER_CMD:
5518                         break;
5519
5520                 case IWM_SCD_QUEUE_CFG: {
5521                         struct iwm_scd_txq_cfg_rsp *rsp;
5522                         rsp = (void *)pkt->data;
5523
5524                         IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5525                             "queue cfg token=0x%x sta_id=%d "
5526                             "tid=%d scd_queue=%d\n",
5527                             rsp->token, rsp->sta_id, rsp->tid,
5528                             rsp->scd_queue);
5529                         break;
5530                 }
5531
5532                 default:
5533                         device_printf(sc->sc_dev,
5534                             "frame %d/%d %x UNHANDLED (this should "
5535                             "not happen)\n", qid, idx,
5536                             pkt->len_n_flags);
5537                         break;
5538                 }
5539
5540                 /*
5541                  * Why test bit 0x80?  The Linux driver:
5542                  *
5543                  * There is one exception:  uCode sets bit 15 when it
5544                  * originates the response/notification, i.e. when the
5545                  * response/notification is not a direct response to a
5546                  * command sent by the driver.  For example, uCode issues
5547                  * IWM_REPLY_RX when it sends a received frame to the driver;
5548                  * it is not a direct response to any driver command.
5549                  *
5550                  * Ok, so since when is 7 == 15?  Well, the Linux driver
5551                  * uses a slightly different format for pkt->hdr, and "qid"
5552                  * is actually the upper byte of a two-byte field.
5553                  */
5554                 if (!(pkt->hdr.qid & (1 << 7))) {
5555                         iwm_cmd_done(sc, pkt);
5556                 }
5557
5558                 ADVANCE_RXQ(sc);
5559         }
5560
5561         IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
5562             IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5563
5564         /*
5565          * Tell the firmware what we have processed.
5566          * Seems like the hardware gets upset unless we align
5567          * the write by 8??
5568          */
5569         hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5570         IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5571 }
5572
5573 static void
5574 iwm_intr(void *arg)
5575 {
5576         struct iwm_softc *sc = arg;
5577         int handled = 0;
5578         int r1, r2, rv = 0;
5579         int isperiodic = 0;
5580
5581         IWM_LOCK(sc);
5582         IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5583
5584         if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5585                 uint32_t *ict = sc->ict_dma.vaddr;
5586                 int tmp;
5587
5588                 tmp = htole32(ict[sc->ict_cur]);
5589                 if (!tmp)
5590                         goto out_ena;
5591
5592                 /*
5593                  * ok, there was something.  keep plowing until we have all.
5594                  */
5595                 r1 = r2 = 0;
5596                 while (tmp) {
5597                         r1 |= tmp;
5598                         ict[sc->ict_cur] = 0;
5599                         sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5600                         tmp = htole32(ict[sc->ict_cur]);
5601                 }
5602
5603                 /* this is where the fun begins.  don't ask */
5604                 if (r1 == 0xffffffff)
5605                         r1 = 0;
5606
5607                 /* i am not expected to understand this */
5608                 if (r1 & 0xc0000)
5609                         r1 |= 0x8000;
5610                 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5611         } else {
5612                 r1 = IWM_READ(sc, IWM_CSR_INT);
5613                 /* "hardware gone" (where, fishing?) */
5614                 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5615                         goto out;
5616                 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5617         }
5618         if (r1 == 0 && r2 == 0) {
5619                 goto out_ena;
5620         }
5621
5622         IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5623
5624         /* ignored */
5625         handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
5626
5627         if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5628                 int i;
5629                 struct ieee80211com *ic = &sc->sc_ic;
5630                 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5631
5632 #ifdef IWM_DEBUG
5633                 iwm_nic_error(sc);
5634 #endif
5635                 /* Dump driver status (TX and RX rings) while we're here. */
5636                 device_printf(sc->sc_dev, "driver status:\n");
5637                 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5638                         struct iwm_tx_ring *ring = &sc->txq[i];
5639                         device_printf(sc->sc_dev,
5640                             "  tx ring %2d: qid=%-2d cur=%-3d "
5641                             "queued=%-3d\n",
5642                             i, ring->qid, ring->cur, ring->queued);
5643                 }
5644                 device_printf(sc->sc_dev,
5645                     "  rx ring: cur=%d\n", sc->rxq.cur);
5646                 device_printf(sc->sc_dev,
5647                     "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5648
5649                 /* Don't stop the device; just do a VAP restart */
5650                 IWM_UNLOCK(sc);
5651
5652                 if (vap == NULL) {
5653                         printf("%s: null vap\n", __func__);
5654                         return;
5655                 }
5656
5657                 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5658                     "restarting\n", __func__, vap->iv_state);
5659
5660                 /* XXX TODO: turn this into a callout/taskqueue */
5661                 ieee80211_restart_all(ic);
5662                 return;
5663         }
5664
5665         if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5666                 handled |= IWM_CSR_INT_BIT_HW_ERR;
5667                 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5668                 iwm_stop(sc);
5669                 rv = 1;
5670                 goto out;
5671         }
5672
5673         /* firmware chunk loaded */
5674         if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5675                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5676                 handled |= IWM_CSR_INT_BIT_FH_TX;
5677                 sc->sc_fw_chunk_done = 1;
5678                 wakeup(&sc->sc_fw);
5679         }
5680
5681         if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5682                 handled |= IWM_CSR_INT_BIT_RF_KILL;
5683                 if (iwm_check_rfkill(sc)) {
5684                         device_printf(sc->sc_dev,
5685                             "%s: rfkill switch, disabling interface\n",
5686                             __func__);
5687                         iwm_stop(sc);
5688                 }
5689         }
5690
5691         /*
5692          * The Linux driver uses periodic interrupts to avoid races.
5693          * We cargo-cult like it's going out of fashion.
5694          */
5695         if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5696                 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5697                 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5698                 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5699                         IWM_WRITE_1(sc,
5700                             IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5701                 isperiodic = 1;
5702         }
5703
5704         if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5705                 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5706                 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5707
5708                 iwm_notif_intr(sc);
5709
5710                 /* enable periodic interrupt, see above */
5711                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5712                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5713                             IWM_CSR_INT_PERIODIC_ENA);
5714         }
5715
5716         if (__predict_false(r1 & ~handled))
5717                 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5718                     "%s: unhandled interrupts: %x\n", __func__, r1);
5719         rv = 1;
5720
5721  out_ena:
5722         iwm_restore_interrupts(sc);
5723  out:
5724         IWM_UNLOCK(sc);
5725         return;
5726 }
5727
5728 /*
5729  * Autoconf glue-sniffing
5730  */
5731 #define PCI_VENDOR_INTEL                0x8086
5732 #define PCI_PRODUCT_INTEL_WL_3160_1     0x08b3
5733 #define PCI_PRODUCT_INTEL_WL_3160_2     0x08b4
5734 #define PCI_PRODUCT_INTEL_WL_3165_1     0x3165
5735 #define PCI_PRODUCT_INTEL_WL_3165_2     0x3166
5736 #define PCI_PRODUCT_INTEL_WL_7260_1     0x08b1
5737 #define PCI_PRODUCT_INTEL_WL_7260_2     0x08b2
5738 #define PCI_PRODUCT_INTEL_WL_7265_1     0x095a
5739 #define PCI_PRODUCT_INTEL_WL_7265_2     0x095b
5740 #define PCI_PRODUCT_INTEL_WL_8260_1     0x24f3
5741 #define PCI_PRODUCT_INTEL_WL_8260_2     0x24f4
5742
5743 static const struct iwm_devices {
5744         uint16_t        device;
5745         const char      *name;
5746 } iwm_devices[] = {
5747         { PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
5748         { PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
5749         { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
5750         { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
5751         { PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
5752         { PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
5753         { PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
5754         { PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
5755         { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
5756         { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
5757 };
5758
5759 static int
5760 iwm_probe(device_t dev)
5761 {
5762         int i;
5763
5764         for (i = 0; i < nitems(iwm_devices); i++) {
5765                 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5766                     pci_get_device(dev) == iwm_devices[i].device) {
5767                         device_set_desc(dev, iwm_devices[i].name);
5768                         return (BUS_PROBE_DEFAULT);
5769                 }
5770         }
5771
5772         return (ENXIO);
5773 }
5774
5775 static int
5776 iwm_dev_check(device_t dev)
5777 {
5778         struct iwm_softc *sc;
5779
5780         sc = device_get_softc(dev);
5781
5782         switch (pci_get_device(dev)) {
5783         case PCI_PRODUCT_INTEL_WL_3160_1:
5784         case PCI_PRODUCT_INTEL_WL_3160_2:
5785                 sc->cfg = &iwm3160_cfg;
5786                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5787                 return (0);
5788         case PCI_PRODUCT_INTEL_WL_3165_1:
5789         case PCI_PRODUCT_INTEL_WL_3165_2:
5790                 sc->cfg = &iwm3165_cfg;
5791                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5792                 return (0);
5793         case PCI_PRODUCT_INTEL_WL_7260_1:
5794         case PCI_PRODUCT_INTEL_WL_7260_2:
5795                 sc->cfg = &iwm7260_cfg;
5796                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5797                 return (0);
5798         case PCI_PRODUCT_INTEL_WL_7265_1:
5799         case PCI_PRODUCT_INTEL_WL_7265_2:
5800                 sc->cfg = &iwm7265_cfg;
5801                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
5802                 return (0);
5803         case PCI_PRODUCT_INTEL_WL_8260_1:
5804         case PCI_PRODUCT_INTEL_WL_8260_2:
5805                 sc->cfg = &iwm8260_cfg;
5806                 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
5807                 return (0);
5808         default:
5809                 device_printf(dev, "unknown adapter type\n");
5810                 return ENXIO;
5811         }
5812 }
5813
5814 static int
5815 iwm_pci_attach(device_t dev)
5816 {
5817         struct iwm_softc *sc;
5818         int count, error, rid;
5819         uint16_t reg;
5820
5821         sc = device_get_softc(dev);
5822
5823         /* Clear device-specific "PCI retry timeout" register (41h). */
5824         reg = pci_read_config(dev, 0x40, sizeof(reg));
5825         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
5826
5827         /* Enable bus-mastering and hardware bug workaround. */
5828         pci_enable_busmaster(dev);
5829         reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5830         /* if !MSI */
5831         if (reg & PCIM_STATUS_INTxSTATE) {
5832                 reg &= ~PCIM_STATUS_INTxSTATE;
5833         }
5834         pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5835
5836         rid = PCIR_BAR(0);
5837         sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5838             RF_ACTIVE);
5839         if (sc->sc_mem == NULL) {
5840                 device_printf(sc->sc_dev, "can't map mem space\n");
5841                 return (ENXIO);
5842         }
5843         sc->sc_st = rman_get_bustag(sc->sc_mem);
5844         sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5845
5846         /* Install interrupt handler. */
5847         count = 1;
5848         rid = 0;
5849         if (pci_alloc_msi(dev, &count) == 0)
5850                 rid = 1;
5851         sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5852             (rid != 0 ? 0 : RF_SHAREABLE));
5853         if (sc->sc_irq == NULL) {
5854                 device_printf(dev, "can't map interrupt\n");
5855                         return (ENXIO);
5856         }
5857         error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5858             NULL, iwm_intr, sc, &sc->sc_ih);
5859         if (sc->sc_ih == NULL) {
5860                 device_printf(dev, "can't establish interrupt");
5861                         return (ENXIO);
5862         }
5863         sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5864
5865         return (0);
5866 }
5867
5868 static void
5869 iwm_pci_detach(device_t dev)
5870 {
5871         struct iwm_softc *sc = device_get_softc(dev);
5872
5873         if (sc->sc_irq != NULL) {
5874                 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5875                 bus_release_resource(dev, SYS_RES_IRQ,
5876                     rman_get_rid(sc->sc_irq), sc->sc_irq);
5877                 pci_release_msi(dev);
5878         }
5879         if (sc->sc_mem != NULL)
5880                 bus_release_resource(dev, SYS_RES_MEMORY,
5881                     rman_get_rid(sc->sc_mem), sc->sc_mem);
5882 }
5883
5884
5885
5886 static int
5887 iwm_attach(device_t dev)
5888 {
5889         struct iwm_softc *sc = device_get_softc(dev);
5890         struct ieee80211com *ic = &sc->sc_ic;
5891         int error;
5892         int txq_i, i;
5893
5894         sc->sc_dev = dev;
5895         sc->sc_attached = 1;
5896         IWM_LOCK_INIT(sc);
5897         mbufq_init(&sc->sc_snd, ifqmaxlen);
5898         callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5899         callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5900         TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5901
5902         sc->sc_notif_wait = iwm_notification_wait_init(sc);
5903         if (sc->sc_notif_wait == NULL) {
5904                 device_printf(dev, "failed to init notification wait struct\n");
5905                 goto fail;
5906         }
5907
5908         /* Init phy db */
5909         sc->sc_phy_db = iwm_phy_db_init(sc);
5910         if (!sc->sc_phy_db) {
5911                 device_printf(dev, "Cannot init phy_db\n");
5912                 goto fail;
5913         }
5914
5915         /* PCI attach */
5916         error = iwm_pci_attach(dev);
5917         if (error != 0)
5918                 goto fail;
5919
5920         sc->sc_wantresp = -1;
5921
5922         /* Check device type */
5923         error = iwm_dev_check(dev);
5924         if (error != 0)
5925                 goto fail;
5926
5927         sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5928         /*
5929          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5930          * changed, and now the revision step also includes bit 0-1 (no more
5931          * "dash" value). To keep hw_rev backwards compatible - we'll store it
5932          * in the old format.
5933          */
5934         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
5935                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5936                                 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5937
5938         if (iwm_prepare_card_hw(sc) != 0) {
5939                 device_printf(dev, "could not initialize hardware\n");
5940                 goto fail;
5941         }
5942
5943         if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5944                 int ret;
5945                 uint32_t hw_step;
5946
5947                 /*
5948                  * In order to recognize C step the driver should read the
5949                  * chip version id located at the AUX bus MISC address.
5950                  */
5951                 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
5952                             IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
5953                 DELAY(2);
5954
5955                 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
5956                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5957                                    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
5958                                    25000);
5959                 if (!ret) {
5960                         device_printf(sc->sc_dev,
5961                             "Failed to wake up the nic\n");
5962                         goto fail;
5963                 }
5964
5965                 if (iwm_nic_lock(sc)) {
5966                         hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
5967                         hw_step |= IWM_ENABLE_WFPM;
5968                         iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
5969                         hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
5970                         hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
5971                         if (hw_step == 0x3)
5972                                 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
5973                                                 (IWM_SILICON_C_STEP << 2);
5974                         iwm_nic_unlock(sc);
5975                 } else {
5976                         device_printf(sc->sc_dev, "Failed to lock the nic\n");
5977                         goto fail;
5978                 }
5979         }
5980
5981         /* special-case 7265D, it has the same PCI IDs. */
5982         if (sc->cfg == &iwm7265_cfg &&
5983             (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
5984                 sc->cfg = &iwm7265d_cfg;
5985         }
5986
5987         /* Allocate DMA memory for firmware transfers. */
5988         if ((error = iwm_alloc_fwmem(sc)) != 0) {
5989                 device_printf(dev, "could not allocate memory for firmware\n");
5990                 goto fail;
5991         }
5992
5993         /* Allocate "Keep Warm" page. */
5994         if ((error = iwm_alloc_kw(sc)) != 0) {
5995                 device_printf(dev, "could not allocate keep warm page\n");
5996                 goto fail;
5997         }
5998
5999         /* We use ICT interrupts */
6000         if ((error = iwm_alloc_ict(sc)) != 0) {
6001                 device_printf(dev, "could not allocate ICT table\n");
6002                 goto fail;
6003         }
6004
6005         /* Allocate TX scheduler "rings". */
6006         if ((error = iwm_alloc_sched(sc)) != 0) {
6007                 device_printf(dev, "could not allocate TX scheduler rings\n");
6008                 goto fail;
6009         }
6010
6011         /* Allocate TX rings */
6012         for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6013                 if ((error = iwm_alloc_tx_ring(sc,
6014                     &sc->txq[txq_i], txq_i)) != 0) {
6015                         device_printf(dev,
6016                             "could not allocate TX ring %d\n",
6017                             txq_i);
6018                         goto fail;
6019                 }
6020         }
6021
6022         /* Allocate RX ring. */
6023         if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6024                 device_printf(dev, "could not allocate RX ring\n");
6025                 goto fail;
6026         }
6027
6028         /* Clear pending interrupts. */
6029         IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6030
6031         ic->ic_softc = sc;
6032         ic->ic_name = device_get_nameunit(sc->sc_dev);
6033         ic->ic_phytype = IEEE80211_T_OFDM;      /* not only, but not used */
6034         ic->ic_opmode = IEEE80211_M_STA;        /* default to BSS mode */
6035
6036         /* Set device capabilities. */
6037         ic->ic_caps =
6038             IEEE80211_C_STA |
6039             IEEE80211_C_WPA |           /* WPA/RSN */
6040             IEEE80211_C_WME |
6041             IEEE80211_C_SHSLOT |        /* short slot time supported */
6042             IEEE80211_C_SHPREAMBLE      /* short preamble supported */
6043 //          IEEE80211_C_BGSCAN          /* capable of bg scanning */
6044             ;
6045         /* Advertise full-offload scanning */
6046         ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6047         for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6048                 sc->sc_phyctxt[i].id = i;
6049                 sc->sc_phyctxt[i].color = 0;
6050                 sc->sc_phyctxt[i].ref = 0;
6051                 sc->sc_phyctxt[i].channel = NULL;
6052         }
6053
6054         /* Default noise floor */
6055         sc->sc_noise = -96;
6056
6057         /* Max RSSI */
6058         sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6059
6060         sc->sc_preinit_hook.ich_func = iwm_preinit;
6061         sc->sc_preinit_hook.ich_arg = sc;
6062         if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6063                 device_printf(dev, "config_intrhook_establish failed\n");
6064                 goto fail;
6065         }
6066
6067 #ifdef IWM_DEBUG
6068         SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6069             SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6070             CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6071 #endif
6072
6073         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6074             "<-%s\n", __func__);
6075
6076         return 0;
6077
6078         /* Free allocated memory if something failed during attachment. */
6079 fail:
6080         iwm_detach_local(sc, 0);
6081
6082         return ENXIO;
6083 }
6084
6085 static int
6086 iwm_is_valid_ether_addr(uint8_t *addr)
6087 {
6088         char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6089
6090         if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6091                 return (FALSE);
6092
6093         return (TRUE);
6094 }
6095
6096 static int
6097 iwm_update_edca(struct ieee80211com *ic)
6098 {
6099         struct iwm_softc *sc = ic->ic_softc;
6100
6101         device_printf(sc->sc_dev, "%s: called\n", __func__);
6102         return (0);
6103 }
6104
6105 static void
6106 iwm_preinit(void *arg)
6107 {
6108         struct iwm_softc *sc = arg;
6109         device_t dev = sc->sc_dev;
6110         struct ieee80211com *ic = &sc->sc_ic;
6111         int error;
6112
6113         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6114             "->%s\n", __func__);
6115
6116         IWM_LOCK(sc);
6117         if ((error = iwm_start_hw(sc)) != 0) {
6118                 device_printf(dev, "could not initialize hardware\n");
6119                 IWM_UNLOCK(sc);
6120                 goto fail;
6121         }
6122
6123         error = iwm_run_init_mvm_ucode(sc, 1);
6124         iwm_stop_device(sc);
6125         if (error) {
6126                 IWM_UNLOCK(sc);
6127                 goto fail;
6128         }
6129         device_printf(dev,
6130             "hw rev 0x%x, fw ver %s, address %s\n",
6131             sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6132             sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6133
6134         /* not all hardware can do 5GHz band */
6135         if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6136                 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6137                     sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6138         IWM_UNLOCK(sc);
6139
6140         iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6141             ic->ic_channels);
6142
6143         /*
6144          * At this point we've committed - if we fail to do setup,
6145          * we now also have to tear down the net80211 state.
6146          */
6147         ieee80211_ifattach(ic);
6148         ic->ic_vap_create = iwm_vap_create;
6149         ic->ic_vap_delete = iwm_vap_delete;
6150         ic->ic_raw_xmit = iwm_raw_xmit;
6151         ic->ic_node_alloc = iwm_node_alloc;
6152         ic->ic_scan_start = iwm_scan_start;
6153         ic->ic_scan_end = iwm_scan_end;
6154         ic->ic_update_mcast = iwm_update_mcast;
6155         ic->ic_getradiocaps = iwm_init_channel_map;
6156         ic->ic_set_channel = iwm_set_channel;
6157         ic->ic_scan_curchan = iwm_scan_curchan;
6158         ic->ic_scan_mindwell = iwm_scan_mindwell;
6159         ic->ic_wme.wme_update = iwm_update_edca;
6160         ic->ic_parent = iwm_parent;
6161         ic->ic_transmit = iwm_transmit;
6162         iwm_radiotap_attach(sc);
6163         if (bootverbose)
6164                 ieee80211_announce(ic);
6165
6166         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6167             "<-%s\n", __func__);
6168         config_intrhook_disestablish(&sc->sc_preinit_hook);
6169
6170         return;
6171 fail:
6172         config_intrhook_disestablish(&sc->sc_preinit_hook);
6173         iwm_detach_local(sc, 0);
6174 }
6175
6176 /*
6177  * Attach the interface to 802.11 radiotap.
6178  */
6179 static void
6180 iwm_radiotap_attach(struct iwm_softc *sc)
6181 {
6182         struct ieee80211com *ic = &sc->sc_ic;
6183
6184         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6185             "->%s begin\n", __func__);
6186         ieee80211_radiotap_attach(ic,
6187             &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6188                 IWM_TX_RADIOTAP_PRESENT,
6189             &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6190                 IWM_RX_RADIOTAP_PRESENT);
6191         IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6192             "->%s end\n", __func__);
6193 }
6194
6195 static struct ieee80211vap *
6196 iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6197     enum ieee80211_opmode opmode, int flags,
6198     const uint8_t bssid[IEEE80211_ADDR_LEN],
6199     const uint8_t mac[IEEE80211_ADDR_LEN])
6200 {
6201         struct iwm_vap *ivp;
6202         struct ieee80211vap *vap;
6203
6204         if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6205                 return NULL;
6206         ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6207         vap = &ivp->iv_vap;
6208         ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6209         vap->iv_bmissthreshold = 10;            /* override default */
6210         /* Override with driver methods. */
6211         ivp->iv_newstate = vap->iv_newstate;
6212         vap->iv_newstate = iwm_newstate;
6213
6214         ieee80211_ratectl_init(vap);
6215         /* Complete setup. */
6216         ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6217             mac);
6218         ic->ic_opmode = opmode;
6219
6220         return vap;
6221 }
6222
6223 static void
6224 iwm_vap_delete(struct ieee80211vap *vap)
6225 {
6226         struct iwm_vap *ivp = IWM_VAP(vap);
6227
6228         ieee80211_ratectl_deinit(vap);
6229         ieee80211_vap_detach(vap);
6230         free(ivp, M_80211_VAP);
6231 }
6232
6233 static void
6234 iwm_scan_start(struct ieee80211com *ic)
6235 {
6236         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6237         struct iwm_softc *sc = ic->ic_softc;
6238         int error;
6239
6240         IWM_LOCK(sc);
6241         if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6242                 error = iwm_mvm_umac_scan(sc);
6243         else
6244                 error = iwm_mvm_lmac_scan(sc);
6245         if (error != 0) {
6246                 device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
6247                 IWM_UNLOCK(sc);
6248                 ieee80211_cancel_scan(vap);
6249         } else {
6250                 iwm_led_blink_start(sc);
6251                 IWM_UNLOCK(sc);
6252         }
6253 }
6254
6255 static void
6256 iwm_scan_end(struct ieee80211com *ic)
6257 {
6258         struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6259         struct iwm_softc *sc = ic->ic_softc;
6260
6261         IWM_LOCK(sc);
6262         iwm_led_blink_stop(sc);
6263         if (vap->iv_state == IEEE80211_S_RUN)
6264                 iwm_mvm_led_enable(sc);
6265         IWM_UNLOCK(sc);
6266 }
6267
6268 static void
6269 iwm_update_mcast(struct ieee80211com *ic)
6270 {
6271 }
6272
6273 static void
6274 iwm_set_channel(struct ieee80211com *ic)
6275 {
6276 }
6277
6278 static void
6279 iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6280 {
6281 }
6282
6283 static void
6284 iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6285 {
6286         return;
6287 }
6288
6289 void
6290 iwm_init_task(void *arg1)
6291 {
6292         struct iwm_softc *sc = arg1;
6293
6294         IWM_LOCK(sc);
6295         while (sc->sc_flags & IWM_FLAG_BUSY)
6296                 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6297         sc->sc_flags |= IWM_FLAG_BUSY;
6298         iwm_stop(sc);
6299         if (sc->sc_ic.ic_nrunning > 0)
6300                 iwm_init(sc);
6301         sc->sc_flags &= ~IWM_FLAG_BUSY;
6302         wakeup(&sc->sc_flags);
6303         IWM_UNLOCK(sc);
6304 }
6305
6306 static int
6307 iwm_resume(device_t dev)
6308 {
6309         struct iwm_softc *sc = device_get_softc(dev);
6310         int do_reinit = 0;
6311         uint16_t reg;
6312
6313         /* Clear device-specific "PCI retry timeout" register (41h). */
6314         reg = pci_read_config(dev, 0x40, sizeof(reg));
6315         pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
6316         iwm_init_task(device_get_softc(dev));
6317
6318         IWM_LOCK(sc);
6319         if (sc->sc_flags & IWM_FLAG_SCANNING) {
6320                 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6321                 do_reinit = 1;
6322         }
6323         IWM_UNLOCK(sc);
6324
6325         if (do_reinit)
6326                 ieee80211_resume_all(&sc->sc_ic);
6327
6328         return 0;
6329 }
6330
6331 static int
6332 iwm_suspend(device_t dev)
6333 {
6334         int do_stop = 0;
6335         struct iwm_softc *sc = device_get_softc(dev);
6336
6337         do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6338
6339         ieee80211_suspend_all(&sc->sc_ic);
6340
6341         if (do_stop) {
6342                 IWM_LOCK(sc);
6343                 iwm_stop(sc);
6344                 sc->sc_flags |= IWM_FLAG_SCANNING;
6345                 IWM_UNLOCK(sc);
6346         }
6347
6348         return (0);
6349 }
6350
6351 static int
6352 iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6353 {
6354         struct iwm_fw_info *fw = &sc->sc_fw;
6355         device_t dev = sc->sc_dev;
6356         int i;
6357
6358         if (!sc->sc_attached)
6359                 return 0;
6360         sc->sc_attached = 0;
6361
6362         if (do_net80211)
6363                 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6364
6365         callout_drain(&sc->sc_led_blink_to);
6366         callout_drain(&sc->sc_watchdog_to);
6367         iwm_stop_device(sc);
6368         if (do_net80211) {
6369                 ieee80211_ifdetach(&sc->sc_ic);
6370         }
6371
6372         iwm_phy_db_free(sc->sc_phy_db);
6373         sc->sc_phy_db = NULL;
6374
6375         iwm_free_nvm_data(sc->nvm_data);
6376
6377         /* Free descriptor rings */
6378         iwm_free_rx_ring(sc, &sc->rxq);
6379         for (i = 0; i < nitems(sc->txq); i++)
6380                 iwm_free_tx_ring(sc, &sc->txq[i]);
6381
6382         /* Free firmware */
6383         if (fw->fw_fp != NULL)
6384                 iwm_fw_info_free(fw);
6385
6386         /* Free scheduler */
6387         iwm_dma_contig_free(&sc->sched_dma);
6388         iwm_dma_contig_free(&sc->ict_dma);
6389         iwm_dma_contig_free(&sc->kw_dma);
6390         iwm_dma_contig_free(&sc->fw_dma);
6391
6392         /* Finished with the hardware - detach things */
6393         iwm_pci_detach(dev);
6394
6395         if (sc->sc_notif_wait != NULL) {
6396                 iwm_notification_wait_free(sc->sc_notif_wait);
6397                 sc->sc_notif_wait = NULL;
6398         }
6399
6400         mbufq_drain(&sc->sc_snd);
6401         IWM_LOCK_DESTROY(sc);
6402
6403         return (0);
6404 }
6405
6406 static int
6407 iwm_detach(device_t dev)
6408 {
6409         struct iwm_softc *sc = device_get_softc(dev);
6410
6411         return (iwm_detach_local(sc, 1));
6412 }
6413
6414 static device_method_t iwm_pci_methods[] = {
6415         /* Device interface */
6416         DEVMETHOD(device_probe,         iwm_probe),
6417         DEVMETHOD(device_attach,        iwm_attach),
6418         DEVMETHOD(device_detach,        iwm_detach),
6419         DEVMETHOD(device_suspend,       iwm_suspend),
6420         DEVMETHOD(device_resume,        iwm_resume),
6421
6422         DEVMETHOD_END
6423 };
6424
6425 static driver_t iwm_pci_driver = {
6426         "iwm",
6427         iwm_pci_methods,
6428         sizeof (struct iwm_softc)
6429 };
6430
6431 static devclass_t iwm_devclass;
6432
6433 DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6434 MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6435 MODULE_DEPEND(iwm, pci, 1, 1, 1);
6436 MODULE_DEPEND(iwm, wlan, 1, 1, 1);